diff --git "a/5332.jsonl" "b/5332.jsonl" new file mode 100644--- /dev/null +++ "b/5332.jsonl" @@ -0,0 +1,705 @@ +{"seq_id":"646346923","text":"#!/usr/bin/env python3\nimport os\nimport re\nimport pickle\nimport warnings\nimport numpy as np\nimport ROOT\nimport yaml\nimport argparse\n\nHE_3_MASS = 2.809230089\nct_bins = np.array([1, 2, 4, 6, 8, 10, 14, 18, 23, 35], dtype=float)\ncent_bins = [0, 90]\n##################################################################\n\nif not os.path.isdir('../Results/2Body/absorption_correction'):\n os.mkdir('../Results/2Body/absorption_correction')\n\nsplit_list = ['antimatter', 'matter']\n\n# mc input file\nmc_file = '/data/fmazzasc/PbPb_2body/absorption_studies/AnalysisResults_extr.root'\noutfile = ROOT.TFile(\"../Results/2Body/He3_abs_extr.root\", \"recreate\")\n\n##################################################################\n# functions\nfunc = {}\nfunc_max = {}\n\nfunc_names = [\"BGBW\", \"Boltzmann\", \"Mt-exp\", \"Pt-exp\", \"LevyTsallis\"]\n\n# functions input files\ninput_func_file = ROOT.TFile(\"../Utils/Anti_fits.root\")\n\n# get functions and maxima from file\nfor i_fun, _ in enumerate(func_names):\n key = f\"cent_{cent_bins[0]}_{cent_bins[1]}_func_{func_names[i_fun]}\"\n func[key] = input_func_file.Get(\n f\"{func_names[i_fun]}/{0}/{func_names[i_fun]}{0}\")\n func_max[key] = func[key].GetMaximum()\n\n# book histograms\nh_abs_radius = {}\nh_abs_ct = {}\nh_gen_radius = {}\nh_gen_ct = {}\nh_rec_radius = {}\nh_rec_ct = {}\n\nfor key in func.keys():\n for split in (split_list + ['all']):\n h_abs_radius[f\"{split}_\" + key] = ROOT.TH1D(\n f\"fAbsRadius_{split}_{cent_bins[0]}_\" + key, \";#it{R}_{#it{abs}} (cm);Entries\", 1000, 0, 1000)\n h_abs_ct[f\"{split}_\" + key] = ROOT.TH1D(\n f\"fAbsCt_{split}_\" + key, \";#it{c}t (cm);Entries\", 2000, 1, 1000)\n h_gen_radius[f\"{split}_\" + key] = ROOT.TH1D(\n f\"fGenRadius_{split}_\" + key, \";#it{R}_{#it{abs}} (cm);Entries\", 1000, 0, 1000)\n h_gen_ct[f\"{split}_\" + key] = ROOT.TH1D(\n f\"fGenCt_{split}_\" + key, \";#it{c}t (cm);Entries\", len(ct_bins) - 1, ct_bins)\n h_rec_radius[f\"{split}_\" + key] = ROOT.TH1D(\n f\"fRecRadius_{split}_\" + key, \";#it{R}_{#it{abs}} (cm);Entries\", 1000, 0, 1000)\n h_rec_ct[f\"{split}_\" + key] = ROOT.TH1D(\n f\"fRecCt_{split}_\" + key, \";#it{c}t (cm);Entries\", len(ct_bins) - 1, ct_bins)\n\n\n# read tree\ndata_frame_he3 = ROOT.RDataFrame('STree', mc_file)\ndata_frame_he3 = data_frame_he3.Range(0, int(1e7))\ndata_frame_he3 = data_frame_he3.Filter(\n 'pt > 2. and pt < 10. and (flag & 1)==1')\nnp_he3 = data_frame_he3.AsNumpy([\"pt\", \"pdg\", \"absCt\", \"eta\"])\n\n# analysis in centrality classes\ncounter = 0\nprint_step_index = 0\nnum_entries = len(np_he3[\"pt\"])\nprint_steps = num_entries*np.arange(0, 1, 0.01)\n\n\nfor he3 in zip(np_he3['pt'], np_he3['pdg'], np_he3['absCt'], np_he3['eta']):\n\n # if counter > 10000:\n # break\n if np.floor(counter/num_entries*100) < 99:\n if counter > print_steps[print_step_index]:\n print(\"Loading.... : \", np.floor(counter/num_entries*100), \" %\")\n print_step_index += 1\n\n split = \"antimatter\"\n if he3[1] == 1000020030:\n split = \"matter\"\n absCt = he3[2]\n key_counter = 0\n\n for key in func.keys():\n\n if key_counter == 0:\n key_counter += 1\n # rejection sampling to reweight pt\n # if ROOT.gRandom.Rndm()*func_max[key] > func[key].Eval(he3[0]):\n # continue\n # sample decay ct and ckeck for absorption\n decCt = ROOT.gRandom.Exp(7.6)\n # polar angle from eta\n tmp = abs(he3[3])\n tmp = ROOT.TMath.Exp(tmp)\n theta = 2*ROOT.TMath.ATan(tmp) # eta = -log[tan(theta/2)]\n # momentum from transverse momentum and angle\n mom = he3[0]/ROOT.TMath.Sin(theta)\n # absorption radius\n abs_radius = absCt*mom/HE_3_MASS\n # decay radius\n dec_radius = decCt*mom/HE_3_MASS\n h_abs_ct[f\"{split}_\" + key].Fill(absCt)\n h_abs_radius[f\"{split}_\" + key].Fill(abs_radius)\n h_gen_radius[f\"{split}_\" + key].Fill(dec_radius)\n h_gen_ct[f\"{split}_\" + key].Fill(decCt)\n # print('gen: ', he3[0])\n if(decCt < absCt or absCt < -0.5): # decCt < absCt\n h_rec_radius[f\"{split}_\" + key].Fill(dec_radius)\n h_rec_ct[f\"{split}_\" + key].Fill(decCt)\n h_rec_ct[\"all_\" + key].Fill(decCt)\n\n counter += 1\n\nfor key in h_rec_ct.keys():\n key_cent = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", key)\n if not outfile.GetDirectory(f\"{key_cent[0]}_{key_cent[1]}\"):\n outfile.mkdir(f\"{key_cent[0]}_{key_cent[1]}\")\n outfile.cd(f\"{key_cent[0]}_{key_cent[1]}\")\n\n # eff radius\n h_rec_radius[key].Divide(h_gen_radius[key])\n h_rec_radius[key].GetXaxis().SetTitle(\"#it{R}_{#it{abs}} (cm)\")\n h_rec_radius[key].GetYaxis().SetTitle(\"1 - #it{f}_{abs}\")\n h_rec_radius[key].Write(\"fEffRadius_\" + key)\n # eff ct\n h_rec_ct[key].Divide(h_gen_ct[key])\n h_rec_ct[key].GetXaxis().SetTitle(\"#it{c}t (cm)\")\n h_rec_ct[key].GetYaxis().SetTitle(\"1 - #it{f}_{abs}\")\n h_rec_ct[key].Write(\"fEffCt_\" + key)\n\n h_abs_ct[key].Write(\"fAbsCt_\" + key)\n h_abs_radius[key].Write(\"fAbsRadius_\" + key)\n\n\nh_rec_ct[\"matter_cent_0_90_func_BGBW\"].Divide(h_rec_ct[\"antimatter_cent_0_90_func_BGBW\"])\nh_rec_ct[\"matter_cent_0_90_func_BGBW\"].Write('matter_antimatter_ratio')\n\noutfile.Close()\n","sub_path":"common/compute_absorption_correction.py","file_name":"compute_absorption_correction.py","file_ext":"py","file_size_in_byte":5244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"537080975","text":"import os\nimport re\nimport json\nimport argparse\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom CKNRM import CKNRM\n\nfrom nltk.corpus import stopwords\nsws = {}\nfor w in stopwords.words('english'):\n sws[w] = 1\nfrom krovetzstemmer import Stemmer\nstemmer = Stemmer()\n\nregex_drop_char = re.compile('[^a-z0-9\\s]+')\nregex_multi_space = re.compile('\\s+')\n\ndef raw2tok(s):\n lst = regex_multi_space.sub(' ', regex_drop_char.sub(' ', s.lower())).strip().split()\n return lst\n\ndef load_glove(glove_file):\n idx = 0\n idx2word = []\n word2idx = {}\n word2vec = {}\n\n # process unk pad\n idx2word.append('')\n word2idx[''] = idx\n word2vec[''] = np.random.normal(scale=0.6, size=(300, ))\n idx += 1\n\n idx2word.append('')\n word2idx[''] = idx\n word2vec[''] = np.random.normal(scale=0.6, size=(300, ))\n idx += 1\n\n with open(glove_file, 'r') as f:\n for line in f:\n val = line.split()\n idx2word.append(val[0])\n word2idx[val[0]] = idx\n word2vec[val[0]] = np.asarray(val[1:], dtype='float32')\n idx += 1\n\n return idx2word, word2idx, word2vec\n\ndef create_embeddings(idx2word, word2vec):\n embedding_matrix = np.zeros((len(idx2word), 300))\n for idx, word in enumerate(idx2word):\n embedding_matrix[idx] = word2vec[word]\n\n return embedding_matrix\n\nclass devFeatures(object):\n def __init__(self, query_id, doc_id, query, doc, raw_score, query_idx, doc_idx, query_len, doc_len):\n self.query_id = query_id\n self.doc_id = doc_id\n self.query = query\n self.doc = doc\n self.raw_score = raw_score\n self.query_idx = query_idx\n self.doc_idx = doc_idx\n self.query_len = query_len\n self.doc_len = doc_len\n\ndef tok2idx(toks, word2idx):\n input_ids = []\n for tok in toks:\n if tok in word2idx:\n input_ids.append(word2idx[tok])\n else:\n input_ids.append(word2idx[''])\n return input_ids\n\ndef filter_sw(toks, length):\n wordsFiltered = []\n for w in toks:\n if w not in sws:\n w = stemmer.stem(w)\n if len(wordsFiltered) >= length:\n break\n wordsFiltered.append(w)\n return wordsFiltered\n\ndef read_data_to_features(input_file, word2idx, args):\n with open(input_file, 'r') as reader:\n cnt = 0\n features = []\n for line in reader:\n cnt += 1\n s = json.loads(line)\n\n query_id = s['query_id']\n query_toks = filter_sw(raw2tok(s['query']), args.max_query_len)\n query_len = len(query_toks)\n while len(query_toks) < 3:\n query_toks.append('')\n query_idx = tok2idx(query_toks, word2idx)\n\n for rec in s['records']:\n doc_id = rec['paper_id']\n raw_score = float(rec['score'])\n doc_toks = filter_sw(raw2tok(rec['paragraph']), args.max_doc_len)\n doc_len = len(doc_toks)\n while len(doc_toks) < 3:\n doc_toks.append('')\n doc_idx = tok2idx(doc_toks, word2idx)\n\n features.append(devFeatures(\n query_id = query_id,\n doc_id = doc_id,\n query = s['query'],\n doc = rec['paragraph'],\n raw_score = raw_score,\n query_idx = query_idx,\n doc_idx = doc_idx,\n query_len = query_len,\n doc_len = doc_len))\n\n return features\n\nclass Ranker(nn.Module):\n def __init__(self, embedding_matrix, args):\n super(Ranker, self).__init__()\n em = torch.tensor(embedding_matrix, dtype=torch.float32).cuda()\n self.embedding = nn.Embedding(args.vocab_size, args.embedding_dim)\n self.embedding.weight = nn.Parameter(em)\n self.embedding.weight.requires_grad = True\n self.ranker = CKNRM(args.kernel_size, args.embedding_dim, args.cnn_kernel, args.cuda, True)\n \n def forward(self, query_idx, pos_idx, neg_idx, query_len, pos_len, neg_len, raw_score=None, is_training=True):\n query_embed = self.embedding(query_idx)\n doc_embed = self.embedding(pos_idx)\n query_mask = self.create_mask_like(query_len, query_embed)\n doc_mask = self.create_mask_like(pos_len, doc_embed)\n\n doc_scores, doc_features = self.ranker(query_embed, doc_embed, query_mask, doc_mask)#, raw_score)\n\n return doc_scores, doc_features\n \n def create_mask_like(self, lengths, like):\n mask = torch.zeros(like.size()[:2])\n for ind, _length in enumerate(lengths.data):\n mask[ind, :_length] = 1\n mask = mask.type_as(like.data)\n mask = torch.autograd.Variable(mask, requires_grad=False)\n return mask\n\ndef devDataLoader(features, batch_size):\n batches = []\n n_samples = len(features)\n idx = np.arange(n_samples)\n for start_idx in range(0, n_samples, batch_size):\n batch_idx = idx[start_idx:start_idx+batch_size]\n\n query_id = [features[i].query_id for i in batch_idx]\n doc_id = [features[i].doc_id for i in batch_idx]\n query = [features[i].query for i in batch_idx]\n doc = [features[i].doc for i in batch_idx]\n raw_score = torch.tensor([features[i].raw_score for i in batch_idx], dtype=torch.float)\n query_idx = [torch.tensor(features[i].query_idx, dtype=torch.long) for i in batch_idx]\n doc_idx = [torch.tensor(features[i].doc_idx, dtype=torch.long) for i in batch_idx]\n query_len = torch.tensor([features[i].query_len for i in batch_idx], dtype=torch.long)\n doc_len = torch.tensor([features[i].doc_len for i in batch_idx], dtype=torch.long)\n\n query_idx = nn.utils.rnn.pad_sequence(query_idx, batch_first=True)\n doc_idx = nn.utils.rnn.pad_sequence(doc_idx, batch_first=True)\n\n batch = (query_id, doc_id, query, doc, raw_score, query_idx, doc_idx, query_len, doc_len)\n batches.append(batch)\n return batches\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--embedding_path', help='embedding path (glove embedding)')\n parser.add_argument('--no_cuda', action='store_true', default=False, help='Disables CUDA training.')\n parser.add_argument('--test_file', help='test file path')\n parser.add_argument('--out_path', help='out path to save trec file')\n parser.add_argument('--pretrained_model', help='check point to load')\n parser.add_argument('--vocab_size', default=400002, type=int, help='vocab size with padding and unk words')\n parser.add_argument('--embedding_dim', default=300, type=int, help='embedding dim')\n parser.add_argument('--kernel_size', default=21, type=int, help='kernel size')\n parser.add_argument('--batch_size', default=32, type=int, help='batch size')\n parser.add_argument('--cnn_kernel', default=128, type=int, help='cnn kernel size')\n parser.add_argument('--max_query_len', default=20, type=int, help='max query length')\n parser.add_argument('--max_doc_len', default=128, type=int, help='max doc length')\n\n args = parser.parse_args()\n args.cuda = not args.no_cuda\n idx2word, word2idx, word2vec = load_glove(args.embedding_path)\n embedding_matrix = create_embeddings(idx2word, word2vec)\n\n model = Ranker(embedding_matrix, args)\n\n chpts = []\n if os.path.isfile(args.pretrained_model):\n chpts.append(args.pretrained_model)\n elif os.path.isdir(args.pretrained_model):\n for chpt in os.listdir(args.pretrained_model):\n chpts.append(os.path.join(args.pretrained_model, chpt))\n else:\n print('pretrained_model must be a path to checkpoint file or folders')\n exit()\n \n\n # test data\n test_features = read_data_to_features(args.test_file, word2idx, args)\n test_data = devDataLoader(test_features, args.batch_size)\n \n rst_dict = {}\n for chpt in chpts:\n state_dict=torch.load(chpt)\n model.load_state_dict(state_dict)\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() and args.cuda else \"cpu\")\n model.to(device)\n\n # test\n for s, batch in enumerate(test_data):\n query_id = batch[0]\n doc_id = batch[1]\n query = batch[2]\n doc = batch[3]\n batch = tuple(t.to(device) for t in batch[4:])\n (raw_score, query_idx, doc_idx, query_len, doc_len) = batch\n\n with torch.no_grad():\n doc_scores, doc_features = model(query_idx, doc_idx, None, query_len, doc_len, None, raw_score, False)\n d_scores = doc_scores.detach().cpu().tolist()\n d_features = doc_features.detach().cpu().tolist()\n raw_score = raw_score.detach().cpu().tolist()\n for (q_id, d_id, q, d, r_s, d_s, d_f) in zip(query_id, doc_id, query, doc, raw_score, d_scores, d_features):\n if q_id not in rst_dict:\n rst_dict[q_id] = {}\n if d_id not in rst_dict[q_id]:\n rst_dict[q_id][d_id] = [0.0, q, d]\n rst_dict[q_id][d_id][0] += d_s\n\n with open(args.out_path, 'w') as writer:\n tmp = {\"query_id\": \"\", \"records\": []}\n for q_id, records in rst_dict.items():\n tmp[\"query_id\"] = q_id\n tmp[\"records\"] = []\n max_pool = []\n res = sorted(records.items(), key=lambda x: x[1][0], reverse=True)\n for rank, value in enumerate(res):\n tmp['query'] = value[1][1]\n if value[0] not in max_pool:\n max_pool.append(value[0])\n tmp[\"records\"].append({\"paper_id\":value[0], \"score\":value[1][0], \"paragraph\":value[1][2]})\n writer.write(json.dumps(tmp) + '\\n')\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ReInfoSelect/inference/cknrm_inference.py","file_name":"cknrm_inference.py","file_ext":"py","file_size_in_byte":9943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"366380758","text":"import os\nimport sys\nimport transaction\n\nfrom pyramid.paster import (\n get_appsettings,\n setup_logging,\n )\n\nfrom pyramid.scripts.common import parse_vars\n\nfrom ..models.meta import Base\nfrom ..models import (\n get_engine,\n get_session_factory,\n get_tm_session,\n )\nfrom ..models import ecard, user\n\n\ndef usage(argv):\n cmd = os.path.basename(argv[0])\n print('usage: %s [var=value]\\n'\n '(example: \"%s development.ini\")' % (cmd, cmd))\n sys.exit(1)\n\n\ndef main(argv=sys.argv):\n if len(argv) < 2:\n usage(argv)\n config_uri = argv[1]\n options = parse_vars(argv[2:])\n setup_logging(config_uri)\n settings = get_appsettings(config_uri, options=options)\n\n engine = get_engine(settings)\n Base.metadata.create_all(engine)\n\n session_factory = get_session_factory(engine)\n\n with transaction.manager:\n dbsession = get_tm_session(session_factory, transaction.manager)\n\n frames = [{'path': '../static/img/frames/bday.png', 'deleted': 0},\n {'path': '../static/img/frames/love.png', 'deleted': 0},\n {'path': '../static/img/frames/modern.png', 'deleted': 0},\n {'path': '../static/img/frames/xmas.png', 'deleted': 0}]\n\n for frame in frames:\n db_frame = ecard.Frame()\n db_frame.path = frame['path']\n db_frame.deleted = frame['deleted']\n dbsession.add(db_frame)\n dbsession.flush()\n\n","sub_path":"cestitke/scripts/initializedb.py","file_name":"initializedb.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"222879420","text":"from django.urls import reverse\nfrom rest_framework.test import APITestCase\n\n\nclass TestSearch(APITestCase):\n\n def test_searching(self):\n \"\"\"\n Ensure we are able to search Github.\n \"\"\"\n url = reverse('github:search')\n url += '?file=js&search=js'\n request = self.client.get(url)\n self.assertTrue(request.status_code, 200)\n","sub_path":"backend/github/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"79744874","text":"\"\"\"\n编写一个函数来查找字符串数组中的最长公共前缀\n\n如果不存在公共前缀,返回空字符串 \"\"\n\n示例 1:\n\n输入: [\"flower\",\"flow\",\"flight\"]\n输出: \"fl\"\n示例 2:\n\n输入: [\"dog\",\"racecar\",\"car\"]\n输出: \"\"\n解释: 输入不存在公共前缀。\n\n\"\"\"\n\n\nclass Solution:\n def longestCommonPrefix(self, strs) -> str:\n result = []\n # 与 zip 相反,可理解为解压,返回二维矩阵式: zip(*[\"flower\", \"flow\", \"flight\"]) => [('f','f','f'),('l','l','l'),(...)]\n s = zip(*strs)\n for i in s:\n if len(set(i)) == 1:\n result += i[0]\n else:\n break\n\n return \"\".join(result)\n\n # def longestCommonPrefix(self, strs: List[str]) -> str:\n # n = len(strs)\n # if n == 0:\n # return \"\"\n # if n == 1:\n # return strs[0]\n # cnt = 0\n # flag = True\n # for i in range(len(strs[0])):\n # val = strs[0][i]\n # for s in strs[1:]:\n # if i > len(s) - 1 or s[i] != val:\n # flag = False\n # break\n # if not flag:\n # break\n # else:\n # cnt += 1\n #\n # return strs[0][:cnt]\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.longestCommonPrefix([\"flower\", \"flow\", \"flight\"]))\n","sub_path":"string/14.查找字符串数组中的最长公共前缀.py","file_name":"14.查找字符串数组中的最长公共前缀.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"621222923","text":"import pandas as pd\n\ndef make_dataframe(collection):\n\n degree = []\n job_status = []\n exp_length = []\n\n skills = []\n title = []\n description = []\n n_title = []\n i = 1\n for doc in collection:\n\n # numeric\n # dict get value use .get(key, defaultValue)\n degree.append(doc.get('feature', {}).get('required_degree_code', 0))\n exp_length.append(doc.get('features', {}).get('required_avg_experience_length', 0))\n job_status.append(doc.get('features', {}).get('job_type_code', 1))\n\n # categorical\n # n_title.append(doc['doc']['normalized_title'])\n\n # text\n skills.append(doc.get('features', {}).get('skills', 'NA'))\n title.append(doc.get('features', {}).get('job_title', 'NA'))\n # description.append(doc.get('features', {}).get('job_snippet', 'NA'))\n description.append(doc['doc']['job_snippet'])\n\n skills_encode = []\n for elements in skills:\n temp = []\n for i in elements:\n temp.append(i.encode(\"utf-8\"))\n skills_encode.append(temp)\n\n description_encode = []\n for i in description:\n description_encode.append(i.encode(\"utf-8\"))\n\n d = {\n 'degree': degree,\n 'exp_length': exp_length,\n 'job_status': job_status,\n # 'years': years,\n 'skills_feature': skills_encode,\n 'title': title,\n 'description': description_encode\n }\n data = pd.DataFrame(d)\n\n return data","sub_path":"functions/extract_feature.py","file_name":"extract_feature.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"578402368","text":"from typing import Union, List\nfrom ..enums import ColumnTypes\nfrom fuze.util import json, md5\n\n\nclass Column(object):\n def __init__(self,\n name: str,\n type: ColumnTypes,\n pk: bool = False,\n fk: Union[bool, str] = False,\n fk_ref: str = None,\n unique: bool = False,\n nullable: bool = False,\n index: bool = False,\n length: int = None,\n comment: str = None,\n aliases: Union[str, List[str]] = None,\n default=None):\n\n if isinstance(fk, str):\n fk_ref = fk\n fk = True\n\n assert name, \"The column name is not specified!\"\n assert type, \"The column type is not specified!\"\n\n self.name = name\n self.type = type\n self.pk = pk\n self.fk = fk\n self.fk_ref = fk_ref\n self.fk_ref_name = fk_ref\n self.comment = comment\n self.unique = unique\n self.nullable = nullable\n self.index = index\n self.length = length\n self.length = int(self.length) if isinstance(self.length, str) and self.length.isdigit() else self.length\n self.aliases = [] if not aliases else [aliases] if isinstance(aliases, str) else aliases\n self.default = default.arg if hasattr(default, \"arg\") else default\n # self.info = info\n\n # self.flags = {\n # \"codec\": True if self.metadata.codec else False,\n # \"format\": True if self.metadata.formatter else False,\n # \"validate\": True if self.metadata.validator else False\n # }\n #\n # # if not self._default:\n # # if self.is_date:\n # # if not self.nullable:\n # # self._default = util.timestamp\n #\n # self.metadata.bind(self)\n\n def clone(self):\n cpy = self.__class__(self.name, self.type)\n for k, v in self.__dict__.items():\n if k[0] != \"_\":\n if hasattr(cpy, k):\n setattr(cpy, k, v)\n return cpy\n\n def deflate(self):\n o = {\n \"name\": self.name,\n \"type\": self.type.name,\n \"nullable\": self.nullable\n }\n if self.pk:\n o[\"pk\"] = True\n if self.fk_ref_name:\n o[\"fk\"] = self.fk_ref_name\n\n if self.unique:\n o[\"unique\"] = True\n if self.index:\n o[\"index\"] = True\n\n if self.length:\n o[\"length\"] = self.length\n\n if self.comment:\n o[\"comment\"] = self.comment\n\n if self.default:\n o[\"default\"] = str(self.default)\n\n return o\n\n def rename(self, name):\n self.name = name\n return self\n\n def peek(self):\n o = {\n \"name\": self.name\n }\n\n # if self.info:\n # if self.info.type:\n # o[\"type\"] = self.metadata.type\n # if self.info.range:\n # o[\"range\"] = self.info.range\n # if self.info.private:\n # o[\"private\"] = True\n\n values = self.enums\n if values is not None:\n if values and hasattr(values[0], \"peek\"):\n values = [v.peek() for v in values]\n o[\"values\"] = values\n o[\"type\"] = \"enum\"\n\n if \"type\" not in o:\n if self.is_num:\n o[\"type\"] = \"numeric\"\n elif self.is_bool:\n o[\"type\"] = \"boolean\"\n elif self.is_date:\n o[\"type\"] = \"date\"\n else:\n o[\"type\"] = \"string\"\n if self.length:\n o[\"range\"] = [0, self.length]\n\n if self.alias and self.alias != self.name:\n o[\"alias\"] = self.alias\n\n o[\"nullable\"] = self.nullable\n return o\n\n def objectify(self):\n o = {\"name\": self.name}\n if self.pk:\n o[\"type\"] = \"pk\"\n elif self.fk:\n o[\"type\"] = \"fk\"\n else:\n o[\"type\"] = self.type.name\n\n if self.unique:\n o[\"unique\"] = True\n if self.nullable:\n o[\"nullable\"] = True\n if self.length:\n o[\"length\"] = self.length\n if self.default:\n o[\"default\"] = self.default\n\n if self.fk_ref_name:\n o[\"relation\"] = self.fk_ref_name\n\n return o\n\n @property\n def fingerprint(self):\n if not hasattr(self, \"_fingerprint\"):\n setattr(self, \"_fingerprint\", md5(json(self.deflate())))\n return getattr(self, \"_fingerprint\")\n\n @property\n def is_int(self):\n return True if self.type == ColumnTypes.INTEGER else False\n\n @property\n def is_num(self):\n return self.type in [ColumnTypes.INTEGER, ColumnTypes.FLOAT, ColumnTypes.DOUBLE, ColumnTypes.DECIMAL]\n\n @property\n def is_str(self):\n return self.type in [ColumnTypes.CHAR, ColumnTypes.VARCHAR, ColumnTypes.TEXT]\n\n @property\n def is_bool(self):\n return True if self.type == ColumnTypes.BOOL else False\n\n def __str__(self):\n return \"Column:\" + self.name\n\n def __repr__(self):\n return self.__str__()\n\n @classmethod\n def inflate(cls, obj: dict):\n name, type = obj[\"name\"], obj[\"type\"]\n\n params = {}\n if obj.get(\"pk\", False):\n params[\"pk\"] = True\n\n if \"fk\" in obj:\n params[\"fk\"] = obj[\"fk\"]\n\n if \"length\" in obj:\n params[\"length\"] = obj[\"length\"]\n\n if \"nullable\" in obj:\n params[\"nullable\"] = obj[\"nullable\"]\n\n if \"unique\" in obj:\n params[\"unique\"] = obj[\"unique\"]\n\n if \"index\" in obj:\n params[\"index\"] = obj[\"index\"]\n\n # if obj.get(\"unique\", False):\n # params[\"unique\"] = True\n #\n # if obj.get(\"index\", False):\n # params[\"index\"] = True\n\n if \"comment\" in obj:\n params[\"comment\"] = obj[\"comment\"]\n\n if \"default\" in obj:\n params[\"default\"] = obj[\"default\"]\n\n instance = cls(name, type, **params)\n return instance","sub_path":"fuze/database/schema/column.py","file_name":"column.py","file_ext":"py","file_size_in_byte":6130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"320884568","text":"import pytest\nfrom dotmailer.templates import Template\nfrom dotmailer.exceptions import ErrorTemplateInvalid\n\n\ndef test_create_valid_template(connection):\n \"\"\"\n Test to confirm that using the API we can successful create a\n template in the user's account.\n\n :param connection:\n :return:\n \"\"\"\n test_template = Template(\n name='Test',\n subject='test',\n from_name='demo@apiconnector.com',\n html_content='',\n plain_text_content='Hello, world! $UNSUB$'\n )\n test_template.create()\n\n assert test_template.id is not None, 'Template has an ID value'\n\n\n@pytest.mark.parametrize('null_parameter', ['name', 'subject', 'from_name',\n 'html_content',\n 'plain_text_content'])\ndef test_create_missing_parameter(connection, null_parameter):\n \"\"\"\n Test to confirm that if we try to submit a template via the API that the \n error response from the API creates an appopriate exception.\n\n :param connection: \n :param null_parameter: \n :return: \n \"\"\"\n test_data = dict(\n name='Test',\n subject='test',\n from_name='demo@apiconnector.com',\n html_content='',\n plain_text_content='Hello, world! $UNSUB$'\n )\n test_data[null_parameter] = None\n with pytest.raises(ErrorTemplateInvalid,\n message='Expecting invalid template exception'):\n test_template = Template(**test_data)\n test_template.create()\n\n\n@pytest.mark.notdemo\ndef test_update_valid_template(connection):\n \"\"\"\n Test to confirm that using the API we can successful create a\n template in the user's account.\n\n NOTE: You can not t\n :param connection:\n :return:\n \"\"\"\n\n # First create a template which we can later update\n template = Template(\n name='Test',\n subject='test',\n from_name='demo@apiconnector.com',\n html_content='',\n plain_text_content='Hello, world! $UNSUB$'\n )\n template.create()\n\n assert template.id is not None, 'Template has an ID value'\n template_id = template.id\n\n template.name = 'New name'\n template.update()\n\n updated_template = Template.get_by_id(template_id)\n assert updated_template.name == 'New name'\n\n\ndef test_get_all(connection):\n \"\"\"\n\n :param connection: \n :return: \n \"\"\"\n templates = Template.get_all()\n for template in templates:\n print(template)\n assert template.id is not None\n\n","sub_path":"tests/test_templates.py","file_name":"test_templates.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"25353197","text":"import json\nimport datetime\n\nfrom sqlalchemy import Column, Integer, String, TIMESTAMP\nfrom sqlalchemy.orm import relationship\n\nimport db\nimport models\nfrom utils.custom_exceptions import DBError\n\n\nclass Query(db.Base):\n __tablename__ = 'queries'\n\n id = Column('id', Integer, primary_key=True)\n user_id = Column('user_id', Integer)\n origin_lng = Column('origin_lng', String)\n origin_lat = Column('origin_lat', String)\n destination_lng = Column('destination_lng', String)\n destination_lat = Column('destination_lat', String)\n created_at = Column('created_at', TIMESTAMP)\n\n results = relationship('Result', back_populates='query')\n\n @staticmethod\n def save_query_and_results(db_session, _query, _results):\n query = Query(\n user_id=1,\n origin_lng=_query['begin']['lng'],\n origin_lat=_query['begin']['lat'],\n destination_lng=_query['destination']['lng'],\n destination_lat=_query['destination']['lat'],\n created_at=datetime.datetime.now()\n )\n\n results = []\n for _result in _results:\n results.append(models.Result(\n cost=_result['cost'],\n way=_result['way'],\n duration=_result['duration'],\n distance=_result['distance'],\n walking_distance=_result['walking_distance'],\n steps=json.dumps(_result['steps']),\n extra=_result['extra'],\n created_at=datetime.datetime.now()\n ))\n db_session.add(query)\n query.results = results\n\n try:\n db_session.commit()\n except Exception as e:\n raise DBError(e)\n","sub_path":"models/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"309957922","text":"import sqlite3 as s\n\nconn = s.connect('/Users/gaegul/PycharmProjects/pythonProject/test.db')\nc = conn.cursor()\n\nc.execute('drop table if exists MenuPan2')\nc.execute('create table MenuPan2(menuName char(50), price int)')\nc.execute('insert into menupan2 values(\"김밥\", 3000) ')\nc.execute('insert into menupan2 values(\"순두부\", 6000) ')\nc.execute('insert into menupan2 values(\"육개장\", 7500) ')\nc.execute('insert into menupan2 values(\"칼국수\", 5500) ')\nc.execute('insert into menupan2 values(\"자짱밥\", 5500) ')\nc.execute('insert into menupan2 values(\"김치찌게\", 8000) ')\n\n\nwhile True:\n su = int(input('조회 : 0, 저장 : 1, 가격변경 : 2, 상품명변경 : 3, 삭제 : 4, 종료 : 5 >'))\n\n if su == 0:\n c.execute('select * from MenuPan2')\n elif su == 1:\n foodName = input('food name : ')\n foodPrice = int(input('food price : '))\n c.execute('insert into MenuPan2 values(?,?)', [foodName, foodPrice])\n conn.commit()\n elif su == 2:\n foodPrice = int(input('food price : '))\n changeFoodPrice = int(input('food price : '))\n c.execute('update MenuPan2 set price =? where menuName = ?', [changeFoodPrice, foodName])\n elif su == 3:\n foodName = int(input('food name : '))\n changeFoodName = int(input('change food name : '))\n c.execute('update MenuPan2 set menuName =? where menuName = ?', [changeFoodName, foodName])\n conn.commit()\n elif su == 4:\n foodName = int(input('delete food name : '))\n c.execute('delete from MenuPan2 where menuName = ?', [foodName])\n conn.commit()\n elif su == 5:\n print('종료합니다.')\n break\n\n c.execute('select * from MenuPan2')\n rows = c.fetchall()\n for row in rows:\n print('{} {}'.format(row[0], row[1]))\n\nc.close()\nconn.close()","sub_path":"SQLlite3/data4.py","file_name":"data4.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"376987035","text":"import urllib.request\nfrom urllib.error import URLError\nfrom bs4 import BeautifulSoup\nimport re\nimport io\nimport sys\n\nclass weather:\n\n def __init__(self,url):\n self.url = url\n self.user_agent = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:46.0) Gecko/20100101 Firefox/46.0'\n self.headers = {'User-Agent' : self.user_agent}\n\n def getPage(self):\n try:\n request = urllib.request.Request(self.url, headers=self.headers)\n response = urllib.request.urlopen(request)\n return response.read().decode('utf-8')\n except URLError as e:\n if hasattr(e, \"reason\"):\n print('链接失败',e.reason)\n return None\n\n def getContent(self):\n page = self.getPage()\n soup = BeautifulSoup(page, \"html5lib\")\n return(soup.select('ul.t'))\n\n","sub_path":"smtp/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"92926380","text":"__author__ = 'Benji'\n\n# The number 3979 has an interesting property. Being prime itself,\n# it is possible to continuously remove digits from left to right,\n# and remain prime at each stage: 3797, 797, 97, 7. Similarly we can\n# work from right to left: 3797, 379, 37 and 3\n#\n# Find the sum of the only eleven primes that are both truncatable\n# from left to right and right to left.\n#\n# 2, 3, 5 and 7 are not considered to be truncatable primes.\n\n\nfrom useful.primes import is_prime, next_prime\n\n\ndef solve():\n trunc_primes = set()\n\n # Generate primes on the go, as no ceiling is given\n for prime in next_prime():\n # 2, 3, 5 and 7 are not considered to be truncatable primes.\n if prime in (2, 3, 5, 7):\n continue\n\n if is_truncated_prime(prime):\n trunc_primes.add(prime)\n\n if len(trunc_primes) == 11:\n return sum(trunc_primes)\n\n\ndef is_truncated_prime(n):\n str_n = str(n)\n for i, _ in enumerate(str_n):\n try:\n if not (is_prime(int(str_n[i:])) and is_prime(int(str_n[:i]))):\n return False\n\n except ValueError:\n continue\n\n return True\n\n\n\n","sub_path":"037/truncatable_primes.py","file_name":"truncatable_primes.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"641520192","text":"# Cleans CSV and returns first column as 3D list of list of lists (tweets, sentences, words)\nimport csv\nimport re\nimport numpy as np\nfrom mtclasses import replace_all, filterblanks\n\n# CSV -> List of [[Text, Date... etc],[Text, ...]]\ndef tweetparse(file_name):\n\twith open(file_name, 'r') as f:\n\t\treadcsv = csv.reader(f)\n\t\talldata = list(readcsv)[1:]\n\t\n\ttweets = []\n\n\t# allowable characters list\n\tchrallow = range(97,123)\n\tchrallow.extend(range(48,58))\n\tchrallow.extend([32,33,38,39,45,46,47,43,58,63])\n\n\tseparators = ['.','?','!']\n\td = {':':'','.':'','&':'and','-':'','!':''}\n\n\t# obtain list of words in tweets\n\tfor row in alldata:\n\t\ttweetxt = row[0].lower()\n\t\ttweetxt = ''.join([char if ord(char) in chrallow else ' ' for char in tweetxt])\n\t\ttweetxt = list(tweetxt.split(' '))\n\t\twordlist = []\n\t\tfor word in tweetxt: # hyperlink control and sentence encoding\n\t\t\tif '/' in word and '.' in word:\n\t\t\t\tif 'http' in word:\n\t\t\t\t\tword = word[0:word.find('http')]\n\t\t\t\telif 'pic.' in word:\n\t\t\t\t\tword = word[0:word.find('pic.')]\n\t\t\telif len(word) > 1 and word[-1] in separators:\n\t\t\t\tword = word[:-1] + '*'\n\t\t\twordlist.append(replace_all(word,d))\n\t\twordlist = filterblanks(wordlist)\n\t\tsentences = list(' '.join(wordlist).split('*'))\n\t\tsentences = filterblanks(sentences)\n\t\tsenwords = []\n\n\t\tfor o in sentences:\n\t\t\twords2 = list(o.split(' '))\n\t\t\twords2 = filterblanks(words2)\n\t\t\tsenwords.append(words2)\n\t\tif senwords != []:\n\t\t\ttweets.append(np.concatenate(senwords))\n\n\treturn tweets","sub_path":"old/cleaner.py","file_name":"cleaner.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"445680019","text":"from rest_framework import serializers\nfrom taxa.models import Taxon, Info, CommonName, GeneralDistribution, Description, PointDistribution\nfrom biblio.serializers import ReferenceDOISerializer\nfrom rest_framework_gis.serializers import GeoFeatureModelSerializer\n\nclass ChildrenInfoField(serializers.RelatedField):\n \"\"\"Used to return count, primary key, name and rank for all child nodes of a taxon, rather than just their pk\"\"\"\n def to_representation(self, value):\n child_count = Taxon.objects.get(id=value.id).get_children().count()\n return {'count': child_count, 'id': value.id, 'name': value.name, 'rank': value.rank.id, 'parent_id': value.id }\n\n\nclass ChildCountField(serializers.RelatedField):\n \"\"\"Used to return count, primary key, name and rank for all child nodes of a taxon, rather than just their pk\"\"\"\n def to_representation(self, value):\n child_count = Taxon.objects.get(id=value).get_children().count()\n return child_count\n\n\nclass StringAndKeyField(serializers.RelatedField):\n \"\"\"Used by jstree to depict rank as well as use rank id in CSS\"\"\"\n def to_representation(self, value):\n return {'id': value.id, 'name': value.name}\n\n\nclass TaxonBasicSerializerWithRank(serializers.ModelSerializer):\n parent = serializers.PrimaryKeyRelatedField(read_only=True)\n rank = StringAndKeyField(read_only=True)\n child_count = ChildCountField(read_only=True, source='id')\n get_latest_assessment = serializers.StringRelatedField(read_only=True)\n\n class Meta:\n model = Taxon\n fields = ('id', 'name', 'get_full_name', 'parent', 'rank', 'child_count', 'get_top_common_name', 'get_latest_assessment')\n\n\nclass TaxonChildrenSerializer(serializers.ModelSerializer):\n children = ChildrenInfoField(required=False, many=True, read_only=True)\n rank = serializers.PrimaryKeyRelatedField(read_only=True)\n\n class Meta:\n model = Taxon\n fields = ('id', 'name', 'rank', 'children')\n\n\nclass CommonNameSerializer(serializers.ModelSerializer):\n reference = ReferenceDOISerializer()\n language = serializers.StringRelatedField()\n\n class Meta:\n model = CommonName\n fields = ('name', 'language', 'reference')\n\n\nclass ArrayChoiceFieldSerializer(serializers.ListSerializer):\n def to_representation(self, value):\n return self.child.choices[value]\n\n\nclass InfoSerializer(serializers.ModelSerializer):\n habitats = serializers.StringRelatedField(read_only=True, many=True)\n reproductive_type = ArrayChoiceFieldSerializer(read_only=True, many=True,\n child=serializers.ChoiceField(allow_blank=True, allow_null=True,\n choices=Info.REPRODUCTIVE_TYPE_CHOICES, label='Congregatory', required=False))\n congregatory = ArrayChoiceFieldSerializer(read_only=True, many=True,\n child=serializers.ChoiceField(allow_blank=True, allow_null=True,\n choices=Info.CONGREGATORY_CHOICES, label='Congregatory', required=False))\n\n class Meta:\n model = Info\n fields = ('morphology',\n 'diagnostics',\n 'trophic',\n 'movement',\n 'migration_patterns',\n 'congregatory',\n 'reproduction',\n 'reproductive_type',\n 'habitat_narrative',\n 'habitats',\n 'altitude_or_depth_range',\n 'maturity_size_female',\n 'maturity_size_male',\n 'max_size',\n 'birth_size',\n 'size_units',\n 'generational_length',\n 'generational_length_narrative',\n 'maturity_age_female',\n 'maturity_age_male',\n 'longevity',\n 'reproductive_age',\n 'gestation_time',\n 'reproductive_periodicity',\n 'average_fecundity',\n 'natural_mortality',\n 'age_units')\n\n\nclass TaxonInfoSerializer(serializers.ModelSerializer):\n info = InfoSerializer()\n\n class Meta:\n model = Taxon\n fields = ('id', 'taxonomic_notes', 'info')\n\n\nclass DistributionSerializer(GeoFeatureModelSerializer):\n residency_status = serializers.CharField(source='get_residency_status_display')\n level = serializers.CharField(source='get_level_display')\n reference = serializers.StringRelatedField()\n\n class Meta:\n model = GeneralDistribution\n geo_field = \"distribution_polygon\"\n fields = ('date', 'residency_status', 'level', 'reference', 'description')\n\n\nclass PointSerializer(GeoFeatureModelSerializer):\n collector = serializers.StringRelatedField()\n\n class Meta:\n model = PointDistribution\n geo_field = \"point\"\n fields = ('date', 'collector', 'precision_m', 'origin_code')\n\n\nclass RankSerializer(serializers.ModelSerializer):\n class Meta:\n model = Taxon\n fields = ('id', 'name')\n\n\nclass CommonNameWriteSerializer(serializers.ModelSerializer):\n # language = serializers.PrimaryKeyRelatedField(queryset=Language.objects.all())\n\n class Meta:\n model = CommonName\n fields = ('name', 'language', 'taxon')\n\n\nclass TaxonWriteSerializer(serializers.ModelSerializer):\n class Meta:\n model = Taxon\n fields = ('id', 'name', 'rank', 'parent', 'taxonomic_notes')\n\n\nclass DescriptionWriteSerializer(serializers.ModelSerializer):\n taxon = serializers.PrimaryKeyRelatedField(queryset=Taxon.objects.all())\n\n class Meta:\n model = Description\n fields = ('taxon', 'reference')\n\n\nclass InfoWriteSerializer(serializers.ModelSerializer):\n taxon = serializers.PrimaryKeyRelatedField(queryset=Taxon.objects.all())\n\n class Meta:\n model = Info\n fields = ('taxon', 'trophic', 'diagnostics', 'morphology', 'habitat_narrative', 'habitats')","sub_path":"taxa/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"30828581","text":"import matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom random import choice\nimport time\nimport csv\n\ndef BubbleSort(lista):\n n = len(lista)\n for i in range(n):\n for j in range(0, n-i-1):\n if lista[j] > lista[j+1] :\n lista[j], lista[j+1] = lista[j+1], lista[j]\n\ndef InsertSort(A):\n for i in range(len(A)): \n min_idx = i \n for j in range(i+1, len(A)): \n if A[min_idx] > A[j]: \n min_idx = j \n A[i], A[min_idx] = A[min_idx], A[i]\n \ndef CountSort(lista):\n ma=max(lista)\n mi=min(lista)\n rango=ma-mi+1\n count=[0 for i in range(rango)]\n output =[0 for i in range(len(lista))]\n for i in lista: \n count[i-mi] += 1\n for i in range(1,rango): \n count[i] += count[i-1] \n for i in range(len(lista)-1,-1,-1): \n output[count[lista[i]-mi]-1] = lista[i] \n count[lista[i]-mi] -= 1\n for i in range(len(lista)): \n lista[i] = output[i] \n # return ans\n\n\ndef heapify(lista, n, i): \n largest = i \n l = 2 * i + 1 \n r = 2 * i + 2 \n if l < n and lista[i] < lista[l]: \n largest = l \n if r < n and lista[largest] < lista[r]: \n largest = r \n if largest != i: \n lista[i],lista[largest] = lista[largest],lista[i] \n heapify(lista, n, largest) \ndef HeapSort(lista): \n n = len(lista) \n for i in range(n, -1, -1): \n heapify(lista, n, i) \n for i in range(n-1, 0, -1): \n lista[i], lista[0] = lista[0], lista[i] \n heapify(lista, i, 0)\ndef MergeSort(lista): \n if len(lista) >1: \n mid = len(lista)//2 \n L = lista[:mid] \n R = lista[mid:] \n MergeSort(L) \n MergeSort(R) \n i = j = k = 0\n while i < len(L) and j < len(R): \n if L[i] < R[j]: \n lista[k] = L[i] \n i+=1\n else: \n lista[k] = R[j] \n j+=1\n k+=1\n while i < len(L): \n lista[k] = L[i] \n i+=1\n k+=1 \n while j < len(R): \n lista[k] = R[j] \n j+=1\n k+=1\n\ndef partition(lista,low,high): \n i = ( low-1 ) \n pivot = lista[high] \n for j in range(low , high): \n if lista[j] < pivot: \n i = i+1 \n lista[i],lista[j] = lista[j],lista[i] \n lista[i+1],lista[high] = lista[high],lista[i+1] \n return ( i+1 )\n\ndef QuickSort(lista,low=0,high=-100):\n if(high==-100):\n high=len(lista)-1\n if low < high: \n pi = partition(lista,low,high) \n QuickSort(lista, low, pi-1) \n QuickSort(lista, pi+1, high)\ndef SelectionSort(lista):\n for i in range(len(lista)): \n min_idx = i \n for j in range(i+1, len(lista)): \n if lista[min_idx] > lista[j]: \n min_idx = j \n lista[i], lista[min_idx] = lista[min_idx], lista[i]\ndef tiempo(f,v):\n '''\n con=0\n for i in range(2):\n start = time.time()\n f( v )\n s=(time.time() - start) *1000\n con=con+s\n return con/2\n '''\n start = time.time()\n f( v )\n return (time.time() - start) *1000\n \n \ndef generarDatos(Ns = range(10,100,10), numTrials=20, listMax = 10, name='name.csv'):\n f=open(name,\"w\")\n for n in Ns:\n lst = [ choice(range(listMax)) for i in range(n) ]\n f.write(str(n)+\";\")\n f.write(str(tiempo(QuickSort,lst))+\";\")\n f.write(str(tiempo(BubbleSort,lst))+\";\")\n f.write(str(tiempo(CountSort,lst))+\";\")\n f.write(str(tiempo(HeapSort,lst))+\";\")\n f.write(str(tiempo(InsertSort,lst))+\";\")\n f.write(str(tiempo(MergeSort,lst))+\";\")\n f.write(str(tiempo(SelectionSort,lst))+\";\\n\")\n \ndef cargar(name):\n\n L=[\"quickSort\",\"bubbleSort\",\"countSort\",\"heapSort\",\"insertionSort\",\"mergeSort\",\"selectionSort\"]\n data=np.loadtxt(name,dtype=np.str,delimiter=';',unpack=True)\n x=data[0,:]\n for i in range(1,8):\n print(i)\n y=data[i,:]\n plt.plot(x,y,marker='.',label=L[i-1])\n #print(y)\n plt.legend()\n if name== \"cplus.csv\":\n \tplt.title(\"C++\")\n if name==\"python.csv\":\n \tplt.title(\"Python\")\n \t\n plt.show()\n\ndef algoritmo(name):\n L=[\"quickSort\",\"bubbleSort\",\"countSort\",\"heapSort\",\"insertionSort\",\"mergeSort\",\"selectionSort\"]\n for i in L:\n \tif L[i]==name:\n \t\tpos=i\n\n data=np.loadtxt('cplus.csv',dtype=np.str,delimiter=';',unpack=True)\n x=data[0,:]\n y=data[1,:]\n plt.plot(x, y, \"-.\", color=\"red\", label=\"C++\")\n\n data=np.loadtxt('python.csv',dtype=np.str,delimiter=';',unpack=True)\n x=data[0,:]\n y=data[1,:]\n plt.plot(x, y, \"-.\", color=\"green\", label=\"Python\")\n plt.legend()\n plt.title(name)\n plt.show()\n\n \n\n\n \n \n\"\"\"\nlista = [64, 34, 25, 12, 22, 11, 90]\n#bubbleSort(lista)\n#countSort(lista)\n#heapSort(lista)\n#insertionSort(lista)\n#mergeSort(lista)\nquickSort(lista)\n#selectionSort(lista)\nprint (\"Sorted listaay is:\")\nfor i in range(len(lista)):\n print (\"%d\" %lista[i])\n\n\n\nnValuesMerge, tValuesMerge = trySelectABunch(quickSort, Ns=nVals, numTrials=1, listMax = 1000)\nplt.plot(nValuesMerge, tValuesMerge, \"-.\", color=\"pink\", label=\"quickSort\")\n\nnValuesMerge, tValuesMerge = trySelectABunch(bubbleSort, Ns=nVals, numTrials=1, listMax = 1000)\nplt.plot(nValuesMerge, tValuesMerge, \"-.\", color=\"red\", label=\"bubbleSort\")\nnValuesMerge, tValuesMerge = trySelectABunch(countSort, Ns=nVals, numTrials=1, listMax = 1000)\nplt.plot(nValuesMerge, tValuesMerge, \"-.\", color=\"green\", label=\"countSort\")\nnValuesMerge, tValuesMerge = trySelectABunch(heapSort, Ns=nVals, numTrials=1, listMax = 1000)\nplt.plot(nValuesMerge, tValuesMerge, \"-.\", color=\"yellow\", label=\"heapSort\")\nnValuesMerge, tValuesMerge = trySelectABunch(insertionSort, Ns=nVals, numTrials=1, listMax = 1000)\nplt.plot(nValuesMerge, tValuesMerge, \"-.\", color=\"black\", label=\"insertionSort\")\nnValuesMerge, tValuesMerge = trySelectABunch(mergeSort, Ns=nVals, numTrials=1, listMax = 1000)\nplt.plot(nValuesMerge, tValuesMerge, \"-.\", color=\"gray\", label=\"mergeSort\")\n\nnValuesMerge, tValuesMerge = trySelectABunch(selectionSort, Ns=nVals, numTrials=1, listMax = 1000)\nplt.plot(nValuesMerge, tValuesMerge, \"-.\", color=\"blue\", label=\"selectionSort\")\nplt.xlabel(\"n\")\nplt.ylabel(\"Time(ms)\")\nplt.legend()\nplt.title(\"Seleccion\")\n\nplt.show()\nplt.savefig(\"1.png\")\n\"\"\"\n\n#nVals = list( range(1000,11000,1000))\n#generarDatos(Ns=nVals, numTrials=1, listMax = 1000,name='python.csv')\n \ncargar('python.csv')\n#cargar('cplus.csv')\n#algoritmo('QuickSort')\n","sub_path":"graficas/python/algoritmos de ordenamiento.py","file_name":"algoritmos de ordenamiento.py","file_ext":"py","file_size_in_byte":6547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"60881900","text":"class logicFunctions:\n def __init__(self, firstVariable, secondVariable):\n self.firstString = firstVariable\n self.secondString = secondVariable\n self.__num = 0\n self.__result = \"\"\n self.__type = \"\"\n\n def __equalLen(self):\n self.__num = max(self.firstString.__len__(), self.secondString.__len__())\n\n if (self.firstString.__len__() < self.__num):\n for i in range(self.__num - self.firstString.__len__()):\n self.firstString = \"0\" + self.firstString\n\n if (self.secondString.__len__() < self.__num):\n for i in range(self.__num - self.secondString.__len__()):\n self.secondString = \"0\" + self.secondString\n\n def andFunction(self):\n self.__type = \"AND\"\n self.__equalLen()\n\n for i in range(self.__num):\n if(int(self.firstString[i]) and int(self.secondString[i])):\n self.__result+=\"1\"\n else:\n self.__result+=\"0\"\n self.__showResult()\n\n def __showResult(self):\n print(self.firstString)\n print(self.secondString)\n print(self.__num*\"-\"+self.__type)\n print(self.__result)\n\n\nlog = logicFunctions(\"101\",\"111\")\nlog.andFunction()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"631803643","text":"import json\nimport math\n\nimport cv\n\nfrom core import PDU\nfrom lib.dashboard_cache import DashboardCache\nfrom lib.log import setup_logging\n\n\nclass RoomPosition(PDU):\n ''' PDU that reads skeleton messages from kinect and translates the position \n of the torso (from kinect coordinates) to room coordinates using the\n sensor position value that is sent by kinect.\n \n INPUT: \n Message from kinect of skeleton type with sensor_position field\n The sensor_position needs the following fields:\n - X,Y,Z: the position of the sensor wrt the room\n - alpha: the rotation around y axis\n - beta: the rotation around x axis\n - gamma: the rotation around z axis\n The message needs a skeleton_3D key with the positions of the torso\n \n OUTPUT:\n Sends a message with a subject_position key set to a dictionary with \n X,Y,Z in room coordinates. The message is sent to the subject-position\n queue\n '''\n \n QUEUE = 'room-position'\n\n def __init__(self, **kwargs):\n super(RoomPosition, self).__init__(**kwargs)\n self.dashboard_cache = DashboardCache()\n\n def process_message(self, message):\n sensor_position = message['sensor_position']\n \"\"\" We are doing rotation using the euler angles\n see http://en.wikipedia.org/wiki/Rotation_matrix\n \"\"\"\n alpha = sensor_position['alpha']\n beta = sensor_position['beta']\n gamma = sensor_position['gamma']\n \n rx = rot_x(beta)\n ry = rot_y(alpha)\n rz = rot_z(gamma)\n \n # some temp variables manipulation follows as the cv library uses\n # output parameters on multilications'''\n \n temp_mat = cv.CreateMat(3,3, cv.CV_64F)\n rot_mat = cv.CreateMat(3,3, cv.CV_64F)\n \n cv.MatMul(ry,rx,temp_mat)\n cv.MatMul(temp_mat, rz, rot_mat)\n \n pos = cv.CreateMat(3,1, cv.CV_64F)\n temp_pos = cv.CreateMat(3,1, cv.CV_64F)\n \n torso_pos = message['skeleton_3D']['torso']\n pos[0,0] = torso_pos['X']\n pos[1,0] = torso_pos['Y']\n pos[2,0] = torso_pos['Z']\n \n cv.MatMul(rot_mat, pos, temp_pos)\n \n pos[0,0] = temp_pos[0,0] + sensor_position['X']\n pos[1,0] = temp_pos[1,0] + sensor_position['Y']\n pos[2,0] = temp_pos[2,0] + sensor_position['Z']\n \n position_message = {\n 'type': 'subject_position',\n 'sensor_id': message['sensor_id'],\n 'created_at': message['created_at'],\n 'X': pos[0,0],\n 'Y': pos[1,0],\n 'Z': pos[2,0],\n }\n \n # Send subject position to Redis\n self.log('Found position %s' % position_message)\n self.dashboard_cache.lpush(sensor_id=message['sensor_id'],\n sensor_type=message['sensor_type'],\n measurement_type=position_message['type'],\n measurement=json.dumps(position_message))\n return None\n\ndef rot_x(phi):\n mat = cv.CreateMat(3,3, cv.CV_64F)\n cv.Set(mat, 0.0)\n mat[0,0] = 1\n mat[1,1] = math.cos(phi)\n mat[1,2] = - math.sin(phi)\n mat[2,1] = math.sin(phi)\n mat[2,2] = math.cos(phi)\n return mat\n\ndef rot_y(phi):\n mat = cv.CreateMat(3,3, cv.CV_64F)\n cv.Set(mat, 0.0) \n mat[0,0] = math.cos(phi)\n mat[0,2] = math.sin(phi)\n mat[1,1] = 1\n mat[2,0] = - math.sin(phi)\n mat[2,2] = math.cos(phi)\n return mat\n\ndef rot_z(phi):\n mat = cv.CreateMat(3,3, cv.CV_64F)\n cv.Set(mat, 0.0) \n mat[0,0] = math.cos(phi)\n mat[0,1] = - math.sin(phi) \n mat[1,0] = math.sin(phi)\n mat[1,1] = math.cos(phi) \n mat[2,2] = 1\n \n return mat\n\nif __name__ == \"__main__\":\n setup_logging()\n module = RoomPosition()\n module.run()\n","sub_path":"pipeline/room_position.py","file_name":"room_position.py","file_ext":"py","file_size_in_byte":3901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"465113675","text":"# encoding:utf-8\r\nimport csv\r\nimport json\r\nimport os\r\nimport logging\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\n\r\nfrom settings import settings\r\n\r\nRUTA_ARCHIVOS = settings['carpeta_bd_fingerprinting']\r\nNUMERO_DE_VECINOS = 1\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\nclass ManejadorDatosFingerPrinting(object):\r\n\r\n def __init__(self):\r\n self.datos_fingerprinting = {}\r\n self.bd_fingerprinting = []\r\n self.cargar_datos()\r\n\r\n def cargar_datos(self):\r\n if not self.bd_fingerprinting:\r\n self._lee_catalogo()\r\n for archivo in self.bd_fingerprinting:\r\n datos = []\r\n puntos = []\r\n try:\r\n with open(os.path.join(RUTA_ARCHIVOS, archivo + \".csv\"), 'r') as csv_file:\r\n lines = csv.reader(csv_file)\r\n dataset = list(lines)\r\n for x in range(len(dataset)):\r\n try:\r\n datos.append([float(dataset[x][0]), float(dataset[x][1]), float(dataset[x][2])])\r\n puntos.append(dataset[x][3])\r\n except:\r\n logger.debug(\"ERROR cargando datos fingerprinting. La linea %d del fichero %s esta corrupta\" % (x, archivo))\r\n neigh = KNeighborsClassifier(n_neighbors=NUMERO_DE_VECINOS)\r\n neigh.fit(datos, puntos)\r\n self.datos_fingerprinting[archivo] = neigh\r\n except:\r\n logger.debug(\"ERROR cargando datos fingerprinting. El fichero %s esta corrupto\" % archivo)\r\n\r\n def _lee_catalogo(self):\r\n try:\r\n with open(os.path.join(RUTA_ARCHIVOS, \"catalogo.json\")) as json_file:\r\n self.bd_fingerprinting = json.load(json_file)['nombre_archivos']\r\n except Exception:\r\n self.bd_fingerprinting = []\r\n\r\n def _actualizar_catalogo(self, nombre_fichero):\r\n with open(os.path.join(RUTA_ARCHIVOS, \"catalogo.json\"), 'w') as catalogo:\r\n self.bd_fingerprinting.append(nombre_fichero)\r\n catalogo.write(\"{}\".format(json.dumps({\"nombre_archivos\": self.bd_fingerprinting})))\r\n\r\n def leer_fichero(self, nombre_fichero):\r\n try:\r\n with open(os.path.join(RUTA_ARCHIVOS, nombre_fichero + \".csv\"), 'r') as csv_file:\r\n lineas_fichero = []\r\n for linea in csv_file:\r\n lineas_fichero.append(linea.replace(\"\\n\", \"\"))\r\n return lineas_fichero\r\n except Exception:\r\n return None\r\n\r\n def crear_fichero(self, nombre_fichero):\r\n fichero = open(os.path.join(RUTA_ARCHIVOS, nombre_fichero + \".csv\"), \"w\")\r\n fichero.close()\r\n\r\n def eliminar_lineas_duplicadas(self, archivo, lineas_fichero, manejador_correciones):\r\n lineas_nuevas = []\r\n reescribir_fichero = False\r\n for linea_correctora in manejador_correciones.lineas_correctoras[archivo]:\r\n es_nueva= True\r\n contador_lineas_fichero = 0\r\n for linea_fichero in lineas_fichero:\r\n if linea_correctora[:-5] == linea_fichero[:-5]:\r\n es_nueva = False\r\n if linea_fichero != linea_correctora:\r\n manejador_correciones.lineas_reescritas.append({linea_correctora: linea_fichero})\r\n reescribir_fichero = True\r\n lineas_fichero[contador_lineas_fichero] = linea_correctora\r\n else:\r\n manejador_correciones.numero_correciones -= 1\r\n for i in range(len(manejador_correciones.log_correcciones)):\r\n if linea_correctora == manejador_correciones.log_correcciones[i]['linea_correctora']:\r\n del manejador_correciones.log_correcciones[i]\r\n break\r\n break\r\n contador_lineas_fichero += 1\r\n if es_nueva:\r\n lineas_nuevas.append(linea_correctora)\r\n\r\n if reescribir_fichero:\r\n for linea_nueva in lineas_nuevas:\r\n lineas_fichero.append(linea_nueva)\r\n return lineas_fichero, reescribir_fichero\r\n return lineas_nuevas, reescribir_fichero\r\n\r\n def insertar_lineas_nuevas(self, lineas_nuevas, archivo, archivo_nuevo=False):\r\n with open(os.path.join(RUTA_ARCHIVOS, archivo + \".csv\"), 'a') as csv_file:\r\n numero_de_lineas = len(lineas_nuevas)\r\n primera_linea = True if not archivo_nuevo else False\r\n for linea in lineas_nuevas:\r\n numero_de_lineas -= 1\r\n if primera_linea:\r\n primera_linea = False\r\n linea = '\\n' + linea\r\n if numero_de_lineas > 0:\r\n csv_file.write(linea + '\\n')\r\n else:\r\n csv_file.write(linea)\r\n\r\n def reescribir_fichero(self, lineas_fichero, archivo):\r\n with open(os.path.join(RUTA_ARCHIVOS, archivo + \".csv\"), 'w') as csv_file:\r\n numero_de_lineas = len(lineas_fichero)\r\n for linea in lineas_fichero:\r\n numero_de_lineas -= 1\r\n if numero_de_lineas > 0:\r\n csv_file.write(linea + '\\n')\r\n else:\r\n csv_file.write(linea)\r\n\r\n def actualizar_bd_fingerprinting(self, manejador_correciones):\r\n for archivo in manejador_correciones.lineas_correctoras:\r\n lineas_fichero = self.leer_fichero(archivo)\r\n archivo_nuevo = False\r\n if lineas_fichero is None:\r\n lineas_fichero = []\r\n self.crear_fichero(archivo)\r\n self._actualizar_catalogo(archivo)\r\n archivo_nuevo = True\r\n\r\n lineas_nuevas, reescribir_fichero = self.eliminar_lineas_duplicadas(archivo, lineas_fichero, manejador_correciones)\r\n\r\n if reescribir_fichero:\r\n self.reescribir_fichero(lineas_nuevas, archivo)\r\n elif lineas_nuevas:\r\n self.insertar_lineas_nuevas(lineas_nuevas, archivo, archivo_nuevo)\r\n\r\nclass ManejadorDatosFingerPrintingTest(ManejadorDatosFingerPrinting):\r\n\r\n def __init__(self):\r\n super(ManejadorDatosFingerPrintingTest, self).__init__()\r\n self.datos_fingerprinting = {}\r\n self.bd_fingerprinting = [\"1001,1002,1003\"]\r\n self.cargar_datos()\r\n\r\n def cargar_datos(self):\r\n from .test.datos_pruebas import cargar_datos_fingerprinting\r\n dataset = cargar_datos_fingerprinting()\r\n datos = []\r\n puntos = []\r\n for x in range(len(dataset)):\r\n datos.append([float(dataset[x][0]), float(dataset[x][1]), float(dataset[x][2])])\r\n puntos.append(dataset[x][3])\r\n neigh = KNeighborsClassifier(n_neighbors=NUMERO_DE_VECINOS)\r\n neigh.fit(datos, puntos)\r\n self.datos_fingerprinting[\"1001,1002,1003\"] = neigh\r\n\r\n def actualizar_bd_fingerprinting(self, manejador_correciones):\r\n assert manejador_correciones.lineas_correctoras['1002,1003,1005'][0] == '-41, -38, -40,1002'\r\n assert manejador_correciones.lineas_correctoras['1002,1003,1005'][1] == '-63, -66, -53,1002'\r\n assert manejador_correciones.lineas_correctoras['1002,1003,1005'][2] == '-45, -54, -55,1001'\r\n assert manejador_correciones.lineas_correctoras['1002,1003,1005'][3] == '-43, -55, -60,1001'\r\n assert manejador_correciones.lineas_correctoras['1002,1003,1005'][4] == '-40, -54, -59,1005'\r\n\r\n lineas_nuevas, reescribir_fichero = self.eliminar_lineas_duplicadas('1002,1003,1005', ['-63, -66, -53,1001'], manejador_correciones)\r\n assert reescribir_fichero\r\n assert len(lineas_nuevas) == 5","sub_path":"fingerprinting/manejador_datos_fingerprinting.py","file_name":"manejador_datos_fingerprinting.py","file_ext":"py","file_size_in_byte":7784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"485483116","text":"from typing import List\ndef old_depthFirstSearch(graph:List[List[int]]):\n counter = 0\n result = [-1]*len(graph)\n visited= [False] * len(graph)\n\n def explore(i:int):\n visited[i]=True\n result[i]= counter\n\n for neighborIndex in graph[i]:\n if not visited[neighborIndex]:\n explore(neighborIndex)\n \n for i in range(len(graph)):\n if not visited[i]:\n explore(i)\n counter+=1\n\n return result\n\n\n\nclass SearchNode(object):\n def __init__(self):\n self.connectedComponentID = None\n self.preClockIndex = None\n self.postClockIndex = None\n self.isVisited = False\n\n\n#return list of search node in topilogical order\n#where each searchnode has id, pre and post clock time and connected component id\ndef depthFirstSearch(graph:List[List[int]]) -> List[SearchNode]:\n counter = 0\n clock = 0\n nodes = list(SearchNode() for _ in range(len(graph)))\n result = []\n\n def explore(i:int):\n nonlocal clock\n nodes[i].isVisited=True\n nodes[i].connectedComponentID= counter\n nodes[i].preClockIndex = clock\n clock += 1\n\n for neighborIndex in graph[i]:\n if not nodes[neighborIndex].isVisited:\n explore(neighborIndex)\n nodes[i].postClockIndex = clock\n clock += 1\n result.append(nodes[i])\n \n for i in range(len(graph)):\n if not nodes[i].isVisited:\n explore(i)\n counter+=1\n\n result.reverse()\n return result\n\n\ndef test_dfs():\n stuff = list(n.connectedComponentID for n in depthFirstSearch(\n [[1,2],\n [0],\n [0],\n [4],\n [3]]))\n assert stuff==[1,1,0,0,0]\n\ndef test_preclock():\n stuff = list(n.preClockIndex for n in depthFirstSearch(\n [[1,2],\n [0],\n [0],\n [4],\n [3]]))\n assert stuff == [6,7,0,3,1]","sub_path":"oldDFS.py","file_name":"oldDFS.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"529410389","text":"\"\"\"An environment to skip k frames and return a max between the last two.\"\"\"\nimport gym\nimport numpy as np\n\n\nclass FrameskipEnv(gym.Wrapper):\n \"\"\"An environment to skip k frames.\"\"\"\n\n def __init__(self, env: gym.Env, skip: int=4) -> None:\n \"\"\"\n Initialize a new max frame skip env around an existing environment.\n\n Args:\n env: the environment to wrap around\n skip: the number of frames to skip (i.e. hold an action for)\n\n Returns:\n None\n\n \"\"\"\n super().__init__(env)\n self._skip = skip\n\n def step(self, action: int) -> tuple:\n \"\"\"\n Take a step using the given action.\n\n Args:\n action: the discrete action to perform.\n\n Returns:\n a tuple of:\n - the start as a result of the action\n - the reward achieved by taking the action\n - a flag denoting whether the episode has ended\n - a dictionary of extra information\n\n \"\"\"\n # the total reward from `skip` frames having `action` held on them\n total_reward = 0.0\n done = False\n # perform the action `skip` times\n for _ in range(self._skip):\n state, reward, done, info = self.env.step(action)\n total_reward += reward\n # break the loop if the game terminated\n if done:\n break\n\n return state, total_reward, done, info\n\n def reset(self) -> np.ndarray:\n \"\"\"Reset the emulator and return the initial state.\"\"\"\n return self.env.reset()\n\n\n# explicitly define the outward facing API of this module\n__all__ = [FrameskipEnv.__name__]\n","sub_path":"gym_tetris/wrappers/frameskip_env.py","file_name":"frameskip_env.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"469313100","text":"import re\nimport urllib\nimport urllib.request\n\n'''\n编写一个爬虫:http://music.163.com/#/discover/artist/\n1.爬取推荐歌手名单\n2.爬取推荐歌手歌曲列表\n3.将以上内容在music.txt上\n'''\n\n\ndef output(html):\n print(html)\n\n\ndef discover():\n # 建立连接,获取主页信息\n url = 'http://music.163.com/discover/artist/'\n req = urllib.request.urlopen(url)\n context = req.read().decode('utf-8')\n\n songer = dict()\n artist_url = 'http://music.163.com'\n a1 = context.find('/artist?id')\n while a1 != -1:\n a2 = context.find('class',a1)\n s1 = context.find('title',a2)\n s2 = context.find('>',s1)\n songer[context[s1+7:s2-4]] = artist_url+context[a1:a2-2]\n a1 = context.find('/artist?id',s2)\n\n for key,value in songer.items():\n print(key , value)\n\ndiscover()","sub_path":"spider/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"3931254","text":"import csv\nimport subprocess\nfrom pathlib import Path\n\nfrom effectiveness.settings import METRICS_DIR, PROJECTS_DIR\n\nJAVA_METRICS_JAR_NAME = 'java-metrics-1.0-SNAPSHOT-jar-with-dependencies.jar'\nJAVA_METRICS_JAR_PATH = METRICS_DIR / JAVA_METRICS_JAR_NAME\n\nJAVA_METRICS_COLUMNS = [\n 'Project',\n 'Package',\n 'Class',\n 'MethodSignature',\n 'OuterClass',\n 'AccessModifier',\n 'IsStatic',\n 'IsFinal',\n 'ATFD',\n 'CA',\n 'CAM',\n 'CBO',\n 'CBOMZ',\n 'CE',\n 'CYCLO',\n 'DAM',\n 'DIT',\n 'LCOM',\n 'LD',\n 'LOC_C',\n 'LOC_M',\n 'MFA',\n 'MOA',\n 'MRD',\n 'NOAM',\n 'NOC',\n 'NOL_C',\n 'NOL_M',\n 'NOM',\n 'NOMM',\n 'NOMR_C',\n 'NOMR_M',\n 'NOPA',\n 'NOPV',\n 'NPM',\n 'RFC',\n 'WMC',\n 'WMCNAMM',\n 'WOC',\n]\n\n\ndef calculate_metrics(project: str):\n project_dir = PROJECTS_DIR / project\n\n results_file_path = project_dir / 'results.csv'\n\n with open(results_file_path, 'w', newline='') as results_file:\n results_file_writer = csv.writer(\n results_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL\n )\n results_file_writer.writerow(JAVA_METRICS_COLUMNS)\n analyze_dir(project_dir / 'src/main/java', project_dir, results_file_writer)\n\n # Merge results to java_metrics.csv\n metrics_output = METRICS_DIR / 'java_metrics.csv'\n if not metrics_output.is_file():\n # create new file and write header\n metrics_output_file = open(metrics_output, 'w', newline='')\n csv.writer(\n metrics_output_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL\n ).writerow(JAVA_METRICS_COLUMNS)\n else:\n # append rows to existing file\n metrics_output_file = open(metrics_output, 'a', newline='')\n\n with metrics_output_file:\n metrics_output_writer = csv.writer(\n metrics_output_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL\n )\n with open(project_dir / 'results.csv', 'rt') as f:\n data = csv.reader(f)\n next(data, None) # omit header\n for row in data:\n metrics_output_writer.writerow(row)\n\n\ndef analyze_dir(rootdir: Path, project_dir: Path, results_writer):\n for subdir in rootdir.iterdir():\n if subdir.is_dir():\n subprocess.run(\n ['java', '-jar', JAVA_METRICS_JAR_PATH, '-i', subdir],\n cwd=project_dir,\n )\n output_file = project_dir / 'output.csv'\n if output_file.is_file():\n with open(output_file, 'r') as f:\n data = csv.reader(f)\n next(data, None) # omit header\n for row in data:\n # use only rows that describe the whole class\n if row[3] == '':\n results_writer.writerow(row)\n output_file.unlink()\n analyze_dir(subdir, project_dir, results_writer)\n","sub_path":"effectiveness/metrics/java_metrics.py","file_name":"java_metrics.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"25341267","text":"# -*- coding: utf-8 -*-\r\n\r\n# pie 차트\r\n# 카테고리 별 값의 상대적인 비교를 위한 그래프 생성\r\n# pie 함수를 사용하여 생성\r\n# pie(데이터크기, 간격정보(explode), \r\n# 데이터라벨(labels), 각데이터색상(colors))\r\n# 참고 사이트\r\n# https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.pie\r\n\r\n# 데이터 크기\r\nsizes = [15, 30, 45, 10]\r\n# 데이터 출력 시 다른 데이터와의 간격\r\nexplode = [0.3, 0, 0, 0.1]\r\n# 데이터 라벨\r\nlabels = ['A', 'B', 'C', 'D']\r\n# 데이터를 출력할 색상\r\ncolors = ['yellow', 'gold', 'red', 'green']\r\n\r\nfrom matplotlib import pyplot as plt\r\n\r\nplt.title('Pie Chart Example')\r\nplt.pie(sizes, explode=explode, labels=labels, colors=colors)\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"day_08/matplotlib/matplotlib_14.py","file_name":"matplotlib_14.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"18134451","text":"#\n# 采用Yixin Engine作为联系对手进行对局:\n#\n# @author: Eric Li\n# 与Gomocup Protoc协议兼容,通过管道Pipe与AI引擎进行通讯,协议可参考:http://petr.lastovicka.sweb.cz/protocl2en.htm\n#\n\nfrom subprocess import *\nimport time, string\nimport threading\n\n# shell=True, bufsize=10\np = Popen('C:/Program Files/Yixin/engine.exe', stdin=PIPE, stdout=PIPE)\n\n# 接收返回结果的线程:\ndef get_yixin_answer():\n global p\n while True:\n line = str(p.stdout.readline().decode(\"GBK\").strip())\n if not line: # 空则跳出\n break\n print(\">>>>>>\", line)\n if not line.startswith(\"MESS\"):\n # print(\"not MESS\")\n pass\n print(\"look up!!! EXIT ===\") # 跳出\n\n\n# 发送命令:\ndef send_command(command=\"\"):\n if not command.endswith(\"\\r\\n\"):\n command = command + \"\\r\\n\" # 必须要有,代表一行的命令结束。\n\n p.stdin.write(command.encode('GBK')) # 编码格式与系统有关?这里若为UTF-8,显示中文不正常。\n p.stdin.flush() # 把输入的命令强制输出\n time.sleep(1)\n return\n\n\n# 启动线程\nw = threading.Thread(target=get_yixin_answer)\nw.start()\n\n# cmd = \"START 15\\r\\n\" # 必须要有,代表一行的命令结束。\n# send_command(\"info nbestsym 2\")\nsend_command(\"START 15\")\nsend_command(\"INFO timeout_turn 1000\") # 每步思考时间:最长1秒,时间越长水平越高。\n# send_command(\"yxblock 7, 7\")\n# send_command(\"done\")\n\n# print(\"NOW begin\")\n# send_command(\"BEGIN\")\n\nsend_command(\"TURN 10, 9\")\nsend_command(\"TURN 7, 7\")\n\n# send_command(\"TAKEBACK 7, 8\")\n# send_command(\"TAKEBACK 7, 7\")\n# send_command(\"TURN 7, 7\")\n# send_command(\"TURN 8, 9\")\n\n","sub_path":"play_with_yinxin.py","file_name":"play_with_yinxin.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"398470758","text":"def integer_distance_to_median(points):\r\n points.sort()\r\n l = len(points)\r\n median = points[l/2]\r\n return sum([abs(median - x) for x in points])\r\n\r\ndef n_conversions(strings):\r\n compressed = [compress(s) for s in strings]\r\n for c in compressed[1:]:\r\n if c[0] != compressed[0][0]:\r\n return \"Fegla Won\"\r\n letterwise = [[compressed[i][1][j] for i in xrange(len(compressed))] for j in xrange(len(compressed[0][0]))]\r\n moves = sum([integer_distance_to_median(x) for x in letterwise])\r\n return moves\r\n\r\n\r\ndef compress(string):\r\n letters = [string[0]]\r\n counts = [1]\r\n for let in string[1:]:\r\n if let == letters[-1]:\r\n counts[-1] += 1\r\n else:\r\n letters.append(let)\r\n counts.append(1)\r\n return letters, counts\r\n\r\n\r\nwith open(\"repeater_small.txt\", \"rb\") as f:\r\n t = int(f.next().strip())\r\n answers = []\r\n for _ in xrange(t):\r\n n = int(f.next().strip())\r\n strings = []\r\n for __ in xrange(n):\r\n strings.append(f.next().strip())\r\n answers.append(n_conversions(strings))\r\n\r\nwith open(\"repeater_small_ans.txt\", \"wb\") as g:\r\n g.write(\"\\r\\n\".join([\"Case #%d: %s\" % (i + 1, str(answers[i])) for i in xrange(len(answers))]))\r\n\r\n","sub_path":"solutions_5751500831719424_0/Python/Bletson/repeater.py","file_name":"repeater.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"589417689","text":"import mailbox\nimport csv\nimport timeit\nimport arrow\nfrom gooey import Gooey, GooeyParser\nfrom email.header import decode_header, make_header\nimport locale\nimport re\nimport os.path\n\n\ndef clean_header(header, verbose=False):\n try:\n return (\n str(make_header(decode_header(re.sub(r\"\\s\\s+\", \" \", header))))\n if header\n else \"\"\n )\n except:\n if verbose:\n print(f\"Failed to properly decode/reencode header:\")\n print(header)\n return header\n\n\ndef process_mbox(mbox_filename, year=None, verbose=False):\n count = 0\n ignored = 0\n emails = []\n\n if verbose and year:\n print(f\"Ignoring emails not from year {year}.\")\n\n for message in mailbox.mbox(mbox_filename):\n count += 1\n\n # When downloading from Google Takeout, there are a few different datetime formats\n # So far, they've all matched one of the below options (with lots of regex for extra whitespace...)\n date_formats = [\n r\"ddd,[\\s+]D[\\s+]MMM[\\s+]YYYY[\\s+]H:mm:ss[\\s+]Z\",\n r\"ddd,[\\s+]D[\\s+]MMM[\\s+]YYYY[\\s+]H:mm:ss[\\s+]ZZZ\",\n r\"ddd,[\\s+]D[\\s+]MMM[\\s+]YYYY[\\s+]H:mm:ss[\\s+]\",\n r\"ddd,[\\s+]DD[\\s+]MMM[\\s+]YYYY[\\s+]HH:mm:ss\",\n r\"ddd[\\s+]D[\\s+]MMM[\\s+]YYYY[\\s+]H:mm:ss[\\s+]Z\",\n r\"D[\\s+]MMM[\\s+]YYYY[\\s+]HH:mm:ss[\\s+]Z\",\n ]\n\n a_date = None\n\n for d_format in date_formats:\n try:\n a_date = arrow.get(message[\"Date\"], d_format)\n break\n except:\n continue\n\n if not a_date:\n print(\n f\"ALERT: '{message['Date']}' does not match any expected format. Ignoring email with subject '{message['Subject']}'.\"\n )\n ignored += 1\n\n else:\n if year and (a_date.format(\"YYYY\") != year):\n ignored += 1\n if verbose:\n print(f\"WARNING: Invalid year found ({a_date.format('YYYY')}).\")\n\n else:\n data = [\n clean_header(message[\"Subject\"], verbose),\n clean_header(message[\"From\"], verbose),\n clean_header(message[\"To\"], verbose),\n a_date,\n ]\n\n emails.append(data)\n\n if verbose and (count % 1000 == 0):\n print(f\"INFO: {count} emails processed.\")\n\n # Sort based on arrow object\n emails = sorted(emails, key=lambda x: x[-1])\n\n # Convert list to desired string format\n date_format = \"M/D/YY\"\n emails = [[*email[:-1], email[-1].format(date_format)] for email in emails]\n\n return [emails, count, ignored]\n\n\ndef export_emails(emails, output_filename):\n with open(\n output_filename, \"w\", newline=\"\", encoding=locale.getpreferredencoding()\n ) as out_file:\n writer = csv.writer(out_file, quoting=csv.QUOTE_MINIMAL)\n writer.writerows(emails)\n\n\n@Gooey(program_name=\"CSDCO CLOG Generator\")\ndef main():\n parser = GooeyParser(\n description=\"Export data (Subject, From, To, Date) from a .mbox file to a CSV\"\n )\n parser.add_argument(\n \"mbox\",\n metavar=\".mbox file\",\n widget=\"FileChooser\",\n type=str,\n help=\"Name of mbox file\",\n )\n # parser.add_argument('-o', '--output-filename', metavar='Output filename', type=str, help='Filename for export.')\n parser.add_argument(\n \"-y\",\n \"--year\",\n metavar=\"Year\",\n type=str,\n help=\"Ignore emails not from this year.\",\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n metavar=\"Verbose\",\n action=\"store_true\",\n help=\"Print troubleshooting information.\",\n )\n args = parser.parse_args()\n\n start_time = timeit.default_timer()\n\n mailbox_filename = args.mbox\n # if \"output_filename\" in args and args.output_filename:\n # if not os.path.exists(args.output_filename):\n # if args.verbose:\n # print(\n # f\"Invalid path name for export given. Changed to {mailbox_filename.replace('.mbox', '.csv')}\"\n # )\n # output_filename = mailbox_filename.replace(\".mbox\", \".csv\")\n # else:\n # output_filename = args.output_filename\n # else:\n output_filename = mailbox_filename.replace(\".mbox\", \".csv\")\n\n # Process data\n print(f\"Beginning processing of {mailbox_filename}...\")\n emails, message_count, ignored_count = process_mbox(\n mailbox_filename, args.year, args.verbose\n )\n\n # Export data\n print(f\"Beginning export of {len(emails)} emails to {output_filename}...\")\n export_emails(emails, output_filename)\n\n print(\n f\"{message_count} emails were found and {message_count - ignored_count} were exported to {output_filename}.\"\n )\n print(f\"Completed in {round((timeit.default_timer()-start_time), 2)} seconds.\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"clog.py","file_name":"clog.py","file_ext":"py","file_size_in_byte":4959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"635769526","text":"n,m = map(int,input().split())\nA = [list(input()) for i in range(n)]\nB = [list(input()) for i in range(m)]\n\nif n == m:\n print (\"Yes\") if A == B else print (\"No\")\n exit ()\n\nfor A_i in range(len(A)):\n for a_i in range(len(A[A_i])):\n if A[A_i][a_i] == B[0][0]:\n if m <= n-A_i and m <= n-a_i:\n tmp = []\n for i in range(A_i,A_i+m):\n tmp_l = []\n for j in range(a_i,a_i+m):\n tmp_l.append(A[i][j])\n tmp.append(tmp_l)\n if B == tmp:\n print (\"Yes\")\n exit ()\nprint (\"No\")\n","sub_path":"Python_codes/p03804/s797754998.py","file_name":"s797754998.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"549650017","text":"'''\nCreated on Mar 19, 2019\n\n@author: malte\n'''\n\nfrom collections import Counter\nimport gc\nfrom itertools import chain\nimport time\n\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection.univariate_selection import f_classif\n\nfrom config.globals import BASE_PATH\nfrom domain.features import FEATURES, CAT_FEATURES\nfrom evaluate import evaluate\nfrom featuregen.create_set import create_set\nfrom helper.df_ops import train_test_split, check_cols\nimport lightgbm as lgbm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom helper.loader import ensure_dir\n\nRAW = 'raw/'\nSET = 'competition/'\n\nCONF = {\n 'train_only': False,\n \n 'pop_hidden': False,\n 'path_pop': BASE_PATH + SET,\n 'min_pop': None,\n \n 'price_hidden': False,\n 'path_price': BASE_PATH + SET,\n 'min_occurences': None,\n 'fillna_mean': False,\n \n 'path_session': BASE_PATH + SET,\n 'path_poi': BASE_PATH + SET,\n \n 'path_crawl': BASE_PATH + 'crawled/',\n \n 'path_meta': BASE_PATH + 'preprocessed/',\n 'meta_latent': 'd2v',\n \n 'path_latent': BASE_PATH + 'competition/',\n}\n\n#ALGO \nLTR = True\nSHUFFLE = False\nVALID = 0.1\nSTOPPING = 500\nFS = None\nFS_IMP = None\n\n#KEYS\nDSKEY = 'dataset'\nALGKEY = 'lgbm_{}_{}_val{}-{}_{}{}_dart0.1'.format( 'ltr' if LTR else 'bin', 'shfl' if SHUFFLE else 'noshfl', VALID, STOPPING, 'FS'+str(FS)+'_' if FS is not None else '', 'FSIMP'+str(FS_IMP)+'_' if FS_IMP is not None else '' )\nSTACK = False\n\ndef main():\n \n tstart = time.time()\n \n train = create_set( base_path=BASE_PATH + SET, conf=CONF, key=DSKEY, redo=False )\n #train = resolve_na(train)\n \n print( 'loaded in {}'.format( (time.time() - tstart) ) )\n tstart = time.time()\n \n #test = train.query('train == 0')\n train.query('train == 1', inplace=True)\n \n print( 'split in {}'.format( (time.time() - tstart) ) )\n tstart = time.time()\n \n if FS_IMP is not None:\n FEATURES_IMP = get_features_by_importance(FS_IMP)\n else:\n FEATURES_IMP = FEATURES\n \n print( [item for item, count in Counter(FEATURES).items() if count > 1] )\n \n y = train[ 'label' ]\n X = train[ FEATURES_IMP + ['session_id'] ] \n \n #input(\"Press Enter to continue...\")\n \n print( 'FEATURES in in {}'.format( (time.time() - tstart) ) )\n tstart = time.time()\n \n if STACK:\n train_stack = train[['user_id','session_id','step','timestamp','impressions']].copy()\n del train\n gc.collect()\n \n print( 'gc collect in in {}'.format( (time.time() - tstart) ) )\n tstart = time.time()\n \n if FS != None:\n check_cols( X )\n keep = feature_selection(X[FEATURES_IMP], y ,FS)\n KEEP_FEATURES = [ FEATURES_IMP[i] for i in keep ]\n else:\n KEEP_FEATURES = FEATURES_IMP\n \n X_train, X_valid, y_train, y_valid = train_test_split( X, y, test_size=VALID, shuffle=SHUFFLE )\n \n print( 'split in in {}'.format( (time.time() - tstart) ) )\n tstart = time.time()\n \n if LTR:\n q_train = X_train.groupby( ['session_id'] ).size().values.astype(np.float32)\n q_valid = X_valid.groupby( ['session_id'] ).size().values.astype(np.float32)\n xtrain = X_train[KEEP_FEATURES].values.astype(np.float32)\n ytrain = y_train.values.astype(np.float32)\n del X_train, y_train\n gc.collect()\n d_train = lgbm.Dataset( xtrain, label=ytrain, group=q_train, feature_name=KEEP_FEATURES)#, categorical_feature=CAT_FEATURES )\n del q_train\n gc.collect()\n xval = X_valid[KEEP_FEATURES].values.astype(np.float32)\n yval = y_valid.values.astype(np.float32)\n del X_valid, y_valid\n gc.collect()\n d_valid = lgbm.Dataset( xval, label=yval, group=q_valid, feature_name=KEEP_FEATURES)#, categorical_feature=CAT_FEATURES )\n del q_valid\n gc.collect()\n else:\n xtrain = X_train[KEEP_FEATURES].values.astype(np.float32)\n ytrain = y_train.values.astype(np.float32)\n del X_train, y_train\n gc.collect()\n d_train = lgbm.Dataset( xtrain, label=ytrain, feature_name=KEEP_FEATURES )#+ ['session_id'])#, categorical_feature=CAT_FEATURES )\n del xtrain, ytrain\n gc.collect()\n xval = X_valid[KEEP_FEATURES].values.astype(np.float32)\n yval = y_valid.values.astype(np.float32)\n del X_valid, y_valid\n gc.collect()\n d_valid = lgbm.Dataset( xval, label=yval, feature_name=KEEP_FEATURES )#+ ['session_id'])#, categorical_feature=CAT_FEATURES )\n del xval, yval\n gc.collect()\n \n print( 'create sets in {}'.format( (time.time() - tstart) ) )\n tstart = time.time()\n \n watchlist = [d_train, d_valid]\n\n params = {}\n params['boosting'] = 'dart'\n params['learning_rate'] = 0.1\n if LTR:\n params['application'] = 'lambdarank'\n params['metric'] = 'ndcg'\n params['eval_at'] = '30'\n #params['group_column'] = 'name:session_id'\n else:\n params['application'] = 'binary'\n params['metric'] = 'binary_logloss'\n# params['max_depth'] = 34\n# params['num_leaves'] = 234\n# params['max_bin'] = 485\n# params['feature_fraction'] = 0.202505\n# params['bagging_fraction'] = 0.823505\n# params['min_data_in_leaf'] = 15\n params['feature_fraction'] = 0.5\n params['bagging_fraction'] = 0.5\n #params['bagging_freq'] = 5\n #params['verbosity'] = 0\n \n evals_result = {}\n model = lgbm.train( params, train_set=d_train, num_boost_round=10000, valid_sets=watchlist, early_stopping_rounds=STOPPING, evals_result=evals_result, verbose_eval=10 ) #, feval=mrr )\n\n print( 'train in in {}'.format( (time.time() - tstart) ) )\n tstart = time.time()\n \n# ax = lgbm.plot_metric(evals_result, metric='auc')\n# plt.show()\n \n export_importance( model, KEEP_FEATURES, export=FS is None and FS_IMP is None )\n \n ensure_dir(BASE_PATH + SET + 'lgbm/')\n model.save_model( BASE_PATH + SET + 'lgbm/'+ALGKEY+'.txt' , num_iteration=model.best_iteration)\n \n test = create_set( base_path=BASE_PATH + SET, conf=CONF, key=DSKEY, redo=False )\n test.query('train == 0', inplace=True)\n \n \n X_test = test[ KEEP_FEATURES ]\n y_test = model.predict(X_test, num_iteration=model.best_iteration )\n \n print( 'predict in {}'.format( (time.time() - tstart) ) )\n tstart = time.time()\n \n test['prob'] = y_test\n \n if STACK:\n test[['user_id','session_id','step','timestamp','impressions','prob']].to_csv( BASE_PATH + '/' + SET + '/stacking/teprobs_' + ALGKEY + '.csv' )\n \n y_pred = model.predict( X[ KEEP_FEATURES ] )\n train_stack['prob'] = y_pred\n train_stack[['user_id','session_id','step','timestamp','impressions','prob']].to_csv( BASE_PATH + '/' + SET + '/stacking/trprobs_' + ALGKEY + '.csv' )\n \n \n# truth = pd.read_csv( BASE_PATH + SET + 'truth.csv' )\n# truth['label2'] = 1\n# test = test.merge( truth[['session_id','reference','label2']], left_on=['session_id','impressions'], right_on=['session_id','reference'], how='left' )\n# test['label'] = test['label2'].fillna(0)\n# del test['label2']\n \n test = test.sort_values(['session_id','prob'], ascending=False)\n \n# test.to_csv( BASE_PATH + SET + '/test_examine.csv' )\n \n solution = pd.DataFrame()\n solution['recommendations'] = test.groupby( 'session_id' ).impressions.apply( list )\n solution['confidences'] = test.groupby( 'session_id' ).prob.apply( list )\n solution.reset_index(drop=True)\n solution = solution.merge( test[['session_id', 'user_id', 'timestamp', 'step']].drop_duplicates(keep='last'), on='session_id', how='inner' ) \n solution.to_csv( BASE_PATH + '/' + SET + '/solution_' + ALGKEY + '.csv' )\n \n result = evaluate( solution, base=BASE_PATH, dataset=SET )\n print( result.T )\n\n\ndef mrr( y_hat, train ):\n res = 0\n correct = train.get_label()\n groups = train.get_group()\n gids = list( chain.from_iterable( [ [idx] * num for num,idx in zip( groups, range(len(groups)) ) ] ) )\n pos = list( chain.from_iterable( [ range(1,num+1) for num,idx in zip( groups, range(len(groups)) ) ] ) )\n a = pd.DataFrame( {'y': y_hat, 'correct': correct, 'group': gids } )\n a = a.sort_values( ['group','y'], ascending=[True,False] )\n a['rank'] = pos\n res = np.mean( 1 / a[ a.correct == 1 ]['rank'].values )\n return 'mrr', res, True\n\ndef feature_selection( X, y, k=10 ):\n\n print('feature_selection ',k)\n# var = VarianceThreshold(threshold=(.8 * (1 - .8)))\n# var.fit_transform(X)\n# sup_var = var.get_support(indices=True)\n sup_cat = [ FEATURES.index( f ) for f in CAT_FEATURES ]\n \n# clf = ExtraTreesClassifier(n_estimators=5)\n# selector = SelectFromModel(clf, threshold=0, max_features=k) \n selector = SelectKBest(f_classif, k=k)\n selector.fit(X, y)\n # Get columns to keep\n sup_kbest = selector.get_support(indices=True)\n # Create new dataframe with only desired columns, or overwrite existing\n# print( sup_var )\n print( sup_kbest )\n print( len(FEATURES) )\n# sup_kbest = list( filter( lambda x: x in sup_var or x in sup_cat, sup_kbest) )\n \n tmp = [ FEATURES[i] for i in list(set(sup_cat) | set(sup_kbest)) ]\n print( tmp )\n \n return list(set(sup_cat) | set(sup_kbest))\n\ndef get_features_by_importance( FS_IMP ):\n \n importance = pd.read_csv( BASE_PATH + SET + 'lgbm_importance.csv' )\n features = list( importance.sort_values('split').tail(FS_IMP).feature.values )\n print( 'select {} of {} features'.format( len(features), len(FEATURES) ) )\n return features\n \ndef export_importance(model, features, plot=False, export=True):\n \n if export:\n importance_split = model.feature_importance()\n importance_gain = model.feature_importance( importance_type='gain' )\n importance = pd.DataFrame({ 'feature': features, 'split': importance_split, 'gain': importance_gain })\n importance.to_csv( BASE_PATH + SET + 'lgbm_importance.csv', index=False )\n if plot:\n ax = lgbm.plot_importance(model, max_num_features=50)\n plt.show()\n \nif __name__ == '__main__':\n main()\n ","sub_path":"lgbm_single.py","file_name":"lgbm_single.py","file_ext":"py","file_size_in_byte":10212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"160689038","text":"import mysql.connector\n\n\nmydb = mysql.connector.connect(\n ...\n)\n\n\ndef truncate_stg_loader():\n mycursor = mydb.cursor()\n sql = 'TRUNCATE TABLE stg_post_loader'\n mycursor.execute(sql)\n\n\ndef sql_insert_stg_intel(data_frame):\n mycursor = mydb.cursor()\n sql = 'INSERT INTO stg_post_loader (id, date, text, attachments_url, post_url) VALUES (%s, %s, %s, %s, %s)'\n mycursor.executemany(sql, data_frame)\n mydb.commit()\n print(mycursor.rowcount, \"inserted.\")","sub_path":"loader/database_loader.py","file_name":"database_loader.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"416881648","text":"#!/usr/local/bin/python3.7\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom numerical_algorithms.runge_kutta import main as rk4\n\nt0 = 0\ny0 = np.array([1])\nf = lambda y, x: -1 * y\nh = 0.01\nn = 500\nf_args = {}\n\ny, t = rk4(x0=t0, y0=y0, f=f, h=h, n=n, f_args=f_args)","sub_path":"three_body_problem/runge_kutta/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"390228893","text":"'''\nFaça um programa que leia um número inteiro e mostra na tela o seu sucessor e seu antecessor\n'''\n\nnumero = int(input(\"Digite um número: \"))\n\nnumero1 = numero + 1\nnumero2 = numero - 1\n\nprint(\"O antecessor de {} é {}, e o sucessor é {}.\" .format(numero, numero2, numero1))\n","sub_path":"Exercicios/Aula07/005.py","file_name":"005.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"13913639","text":"import json\nimport os\nimport os.path\n\nimport datetime\nimport operator\nimport time\nimport pytz\nimport requests\n\nfrom codecs import open\n\nfrom pelican import signals\nfrom pelican.generators import Generator\n\nfrom jinja2 import BaseLoader, TemplateNotFound\n\ndef get_generators(pelican_object):\n return ProductivityGenerator\n\ndef register():\n signals.get_generators.connect(get_generators)\n\nclass ProductivityGenerator(Generator):\n def fetch_rt_data(self):\n a = time.gmtime()\n ndt = datetime.datetime(a.tm_year, a.tm_mon, a.tm_mday, a.tm_hour, a.tm_min)\n utc = pytz.timezone('utc').localize(ndt)\n local_time = utc.astimezone(pytz.timezone(self.settings['TIMEZONE']))\n\n hours = []\n\n for day in range(-29, 0, 3):\n prev_day = local_time + datetime.timedelta(day)\n yd = prev_day + datetime.timedelta(-1)\n nd = prev_day + datetime.timedelta(1)\n \n query = {'rb':yd.strftime('%Y-%m-%d'),\n 're':nd.strftime('%Y-%m-%d'),\n 'rk':'productivity',\n 'format':'json',\n 'key':self.settings['RESCUETIME_KEY'], }\n\n r = requests.get('https://www.rescuetime.com/anapi/data', params=query)\n data = r.json()\n\n seconds = 0\n for row in data['rows']:\n if row[3] >= 1:\n seconds += row[1]\n hours.append({'day':prev_day.strftime('%m-%d'), 'hour':float(seconds/108)/100.})\n \n self.context['productive_hours'] = hours\n\n def generate_context(self):\n self.fetch_rt_data()\n\n def generate_output(self, writer):\n for source, dest in self.settings['PROD_PAGE'].items():\n self.env.loader.loaders.insert(0, _FileLoader(source, self.path))\n try:\n template = self.env.get_template(source)\n rurls = self.settings['RELATIVE_URLS']\n writer.write_file(dest, template, self.context, rurls,\n override_output=True)\n finally:\n del self.env.loader.loaders[0]\n\nclass _FileLoader(BaseLoader):\n\n def __init__(self, path, basedir):\n self.path = path\n self.fullpath = os.path.join(basedir, path)\n\n def get_source(self, environment, template):\n if template != self.path or not os.path.exists(self.fullpath):\n raise TemplateNotFound(template)\n mtime = os.path.getmtime(self.fullpath)\n with open(self.fullpath, 'r', encoding='utf-8') as f:\n source = f.read()\n return (source, self.fullpath,\n lambda: mtime == os.path.getmtime(self.fullpath))\n\n","sub_path":"productivity.py","file_name":"productivity.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"613740595","text":"# python version 3.8\r\n\r\nfrom PyQt5.QtWidgets import *\r\n\r\nclass HEADER_of_3DS:\r\n def __init__(self,\r\n name=None,\r\n poltype=None,\r\n thin=None,\r\n thout=None,\r\n phi=None,\r\n ominc=None,\r\n eloss=None,\r\n gamma_c=None,\r\n gamma_f=None,\r\n scattering_axis=None,\r\n eval_i=None,\r\n eval_n=None,\r\n trans_op=None,\r\n gs_list=None,\r\n temperature=None,\r\n spectra=None):\r\n self.name = name if name is not None else \"\"\r\n self.poltype = poltype if poltype is not None else \"\" # (str,str)\r\n self.thin = thin # float\r\n self.thout = thout if thout is not None else \"\" # float\r\n self.phi = phi # float\r\n self.ominc = ominc if ominc is not None else [] # list of float\r\n self.eloss = eloss if eloss is not None else [] # list of float\r\n self.gamma_c = gamma_c if gamma_c is not None else [] # list of float\r\n self.gamma_f = gamma_f if gamma_f is not None else [] # list of float\r\n self.scattering_axis = scattering_axis if scattering_axis is not None else [[]] # list of list\r\n self.eval_i = eval_i if eval_i is not None else [] # list of list\r\n self.eval_n = eval_n if eval_n is not None else [] # list of list\r\n self.trans_op = trans_op if trans_op is not None else [[]] # list of list\r\n self.gs_list = gs_list if gs_list is not None else [] # list\r\n self.temperature = temperature if temperature is not None else \"\" # float\r\n self.spectra = spectra if spectra is not None else \"{}\"\r\n\r\nclass DataManager_3DS:\r\n def __init__(self):\r\n self.spectraBasicDataList = {}\r\n self.currentSpectraBasicData = {}\r\n\r\n def getNameFromSpectraData(self) -> str:\r\n return \"\"\r\n\r\n def addSpectraData(self, spectraData) -> bool:\r\n return False\r\n\r\n def getSpectraDataByName(self, name: str):\r\n if name in self.spectraBasicDataList.keys():\r\n return self.spectraBasicDataList[name]\r\n else:\r\n return None\r\n\r\ndef Read3DSinWindow(mainWindow=None) -> HEADER_of_3DS or None:\r\n fileName, fileType = QFileDialog.getOpenFileName(mainWindow, r'Load json',\r\n r'D:\\Users\\yg\\PycharmProjects\\spectra_data',\r\n r'json Files(*.json)')\r\n if fileName == \"\":\r\n return None\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass","sub_path":"DataManager.py","file_name":"DataManager.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"428034301","text":"import torch\nimport random\nfrom torch.autograd import Variable\n\ndef getpAtN(network_x,network_y):\n f_test = open(\"twitter_foursquare_groundtruth/groundtruth.9.foldtrain.test.number\")\n # f_test = open(\"twitter_foursquare_groundtruth/groundtruth.9.foldtrain.train.number\")\n pAtN_x_map=dict()\n\n print('-------------------------')\n line = f_test.readline()\n all = 0\n i = 0\n while line:\n target = 0\n array_edge = line\n array_edge = array_edge.replace(\"\\n\", \"\")\n y = array_edge + \"_twitter\"\n x = array_edge + \"_foursquare\"\n\n if x in network_x.keys() and y in network_y.keys():\n sam = torch.cosine_similarity(network_x[x], network_y[y], dim=0)\n for value in network_y.values():\n if (torch.cosine_similarity(network_x[x], value, dim=0).double() > sam.double()):\n target += 1\n pAtN_x_map[array_edge]=target\n all += 1\n line = f_test.readline()\n i += 1\n f_test.close()\n return pAtN_x_map\n\n\ndef getpAtN_Revers(network_y, network_x):\n f_test = open(\"twitter_foursquare_groundtruth/groundtruth.9.foldtrain.test.number\")\n # f_test = open(\"twitter_foursquare_groundtruth/groundtruth.9.foldtrain.train.number\")\n pAtN_y_map = dict()\n\n print('-------------------------')\n line = f_test.readline()\n all = 0\n i = 0\n while line:\n target = 0\n array_edge = line\n array_edge = array_edge.replace(\"\\n\", \"\")\n y = array_edge + \"_twitter\"\n x = array_edge + \"_foursquare\"\n\n if x in network_x.keys() and y in network_y.keys():\n sam = torch.cosine_similarity(network_x[x], network_y[y],dim=0)\n for value in network_x.values():\n if (torch.cosine_similarity(network_y[y], value,dim=0).double() > sam.double()):\n target += 1\n pAtN_y_map[array_edge] = target\n all += 1\n line = f_test.readline()\n i += 1\n f_test.close()\n return pAtN_y_map,all\ndef test():\n a=[0]*30\n b=[0]*30\n # 读取自己的anchor文件\n f_networkx = open(\"foursquare/embeddings/emb-23.number\")\n f_networky = open(\"twitter/embeddings/emb-23.number\")\n\n network_x=dict()\n network_y=dict()\n\n\n line=f_networkx.readline()\n while line:\n listx = []\n line=line.replace(\"|\\n\",\"\")\n sp=line.split(\" \",1)\n vector_array=sp[1].split(\"|\",127)\n for x in vector_array:\n listx.append(x)\n listx=list(map(float,listx))\n vector=change2tensor(listx)\n network_x[sp[0]]=vector\n line=f_networkx.readline()\n f_networkx.close()\n\n line = f_networky.readline()\n while line:\n listy = []\n line = line.replace(\"|\\n\", \"\")\n sp = line.split(\" \", 1)\n vector_array = sp[1].split(\"|\", 127)\n for y in vector_array:\n listy.append(y)\n listy = list(map(float, listy))\n vector = change2tensor(listy)\n network_y[sp[0]] = vector\n line = f_networky.readline()\n f_networky.close()\n\n map_x=getpAtN(network_x,network_y)\n map_y,all=getpAtN_Revers(network_y,network_x)\n\n for i in range(30):\n for value in map_x.values():\n if value==i:\n a[i]+=1\n for i in range(30):\n for value in map_y.values():\n if value==i:\n b[i]+=1\n\n for i in range(30):\n a[i]/=all\n for i in range(30):\n b[i]/=all\n\n for i in range(1,30):\n a[i]+=a[i-1]\n b[i]+=b[i-1]\n for i in range(30):\n print(i,':',(a[i]+b[i])/2,end=', ')\ndef change2tensor(list):\n list = torch.Tensor(list)\n list = list.squeeze()\n list = Variable(list)\n return list\n\nif __name__ == '__main__':\n test()","sub_path":"AcrossNetworkEmbeddingData/testqAtN.py","file_name":"testqAtN.py","file_ext":"py","file_size_in_byte":3759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"403849833","text":"#!/usr/bin/python\n# -*- coding: Utf8 -*-\n\n\"\"\"\n@date: 11/04/2013\n@author: De Ryck Aurélien\n\"\"\"\nimport RPi.GPIO as GPIO\nimport SPI.SPI as SPI\nimport SPI.SimSPI as SimSPI #appel une classe qui simule la communication spi\nimport variable, logging, Tache , time\nimport Servomoteur, Motor, Capteur\nimport Choix_Tache\n\nclass initialisation(object):\n\t\"\"\"\n\tclasse initialisation\n\t=====================\n\tCette classe vérifie le robot avant le début du tournois,\n\telle doit demander à tout les servomteurs de bouger, faire bouger le robot\n\t@note : cette phase de déplacement pourra être utilisée pour calibrer la position du robot\n\t\n\t\"\"\"\n\tdef __init__(self):\n\t\t#initialisation de la communication\n\n\t\tif variable.DEBUG == True:\n\t\t\tlogging.warning(\"Le programme a été lancé en mode DEBUG\")\n\t\t\tvariable.Communication = SimSPI.SimSPI()\n\n\t\telse:\n\t\t\tvariable.Communication = SPI.SPI() \n\t\t\t\n\t\t\tGPIO.setmode(GPIO.BOARD)\n\t\t\tGPIO.setup(16, GPIO.IN) #color\n\t\t\tGPIO.setup(10, GPIO.IN) #strategie\n\t\t\tGPIO.setup(15, GPIO.IN)\t#init\n\t\t\tGPIO.setup(13, GPIO.IN)\t#start\n\n\t\t#test si la connection avec la DE0-NANO est bonne\n\t\t#si elle vaut 255, c'est que la conection est mauvaise.\n\t\t#elle doit valoir 170 pour être certain que le programme à bien été charger dans le FPGA \n\t\ta = 0\n\t\twhile variable.Communication.send(1, 0, 170) != 170:\n\t\t\tif a == 0:\n\t\t\t\t\tlogging.critical(\"la deo-nano ne répond pas\")\n\t\t\t\t\ta = 1\n\t\tlogging.info(\"le FPGA a répondu\")\n\n\t\t#reset fpga pour fixer les valeurs par défaut\n\t\tvariable.Communication.send(False, variable.adrReset, 0)\n\t\ttime.sleep(0.5)\n\t\tvariable.Communication.send(False, variable.adrReset, 1)\n\t\tlogging.info(\"reset effectué\")\n\n\t\tself.adrstart = 1 \n\t\tvariable.tempo = 0.6 # toute les 0.2 secondes, il va executer l'action duringrun\n\n\t\tif variable.tpstimer != 90:\n\t\t\tlogging.warning(\"le timer est de %f seconde au lieu de 90 seconde\" % variable.tpstimer)\n\n\t\t#création des différents modules \n\t\tvariable.Motors = Motor.Motors() #initialise les moteurs\n\t\tvariable.Capteurs = Capteur.Capteurs() #initialise les capteurs \n\t\tvariable.Servomoteur = Servomoteur.Servomoteur() #initialise les servomoteurs\n\n\t\t#pas implémentée\n\t\t#variable.Choix_Tache = Choix_Tache.ChoixTache() #initialise les taches \n\t\t#variable.selectTask = variable.Choix_Tache.listTache[0] #prend la tache 0 comme tache de départ\n\n\t\t\"\"\"\n\t\tpermet d'activer différents modules de tests :\n\t\t - configuration : pour utilisé la carte qui détermine la couleur de l'équipe et la stratégie\n\t\t - test servo : pour vérifier que le bras répond correctement\n\t\t - test moteur : pour vérifier que les moteurs fonctionnent correctement \n\t\t\"\"\"\n\t\t#self.configuration()\n\t\t#self.testServo()\n\t\t#self.testMoteur()\n\t\t\n\tdef configuration(self):\n\t\t\"\"\"\n\t\t@note : non testé\n\t\tattend le switch qui active la phase initialisation\n\t\t\"\"\"\n\t\tStartinit = 1\n\t\t#attend qu'on active le switch init\n\t\twhile Startinit == 1:\n\t\t\t#détermination de la couleur de l'équipe\n\t\t\tcolor = GPIO.input(16)\n\t\t\tstrategie = GPIO.input(10)\n\t\t\tStartinit = GPIO.input(15)\n\t\t\t#ralentie la vitesse de la boucle \n\t\t\ttime.sleep(0.2)\n\t\t\n\t\tif color == 0:\n\t\t\tlogging.info(\"le robot est bleu \")\n\t\t\tvariable.ColorTeam = \"Blue\"\n\t\telse :\n\t\t\tlogging.info(\"le robot est rouge \")\n\t\t\tvariable.ColorTeam = \"Red\"\n\n\t\tif strategie== True :\n\t\t\tlogging.info(\"La startegie choisi est l'homologation \") \n\t\telse:\n\t\t\tlogging.info(\"La startegie choisi est la compétition \") \n \n\t\t#fixe le point et l'angle de départ\n\t\t#variable.pointactuel = Tache.Point([0,1300,600,'[]',1]) \n\t\tvariable.pointactuel = Tache.Point([1,1300,600,'[]',1]) \n\t\tvariable.angleactuel = -90\n\t\t#si on appratient à l'équipe bleu : il faut adapter les coordonnées\n\t\tif variable.ColorTeam == 'Blue':\n\t\t\tvariable.pointactuel.x *= -1\n\t\t\tvariable.angleactuel *= -1\n\t\tlogging.info(\"le robot va démarrer du point [%i,%i] et avec un angle de %i\"%(variable.pointactuel.x, variable.pointactuel.y, variable.angleactuel))\n\n\tdef testServo(self):\n\t\t\"\"\"\n\t\tDoit effectuer tout les actions prévus pour les servomteurs\n\t\t@note : non testé\n\t\t\"\"\"\n\t\t#initialise la classe servo\n\t\tservo = Servomoteur.Servomoteur()\n\t\tfor action in servo.actions:\n\t\t\tvariable.Communication.send(False, servo.adrservo, action)\n\t\t\tencours = 0\n\t\t\twhile encours == 0:\n\t\t\t\tencours = variable.Communication.send(True, servo.adrservorun)\n\t\t\t\ttime.sleep(0.1)\n\t\t\twhile encours == 1:\n\t\t\t\tencours = variable.Communication.send(True, servo.adrservorun)\n\t\t\tlogging.info(\"Le robot a effectuer l'action %i sur les servo\" %action )\n\n\t\tlogging.info(\"Test des moteurs\")\n\t\tvariable.Motors.PrepareToSend(angle = 0, distance = 200 , recul = False)\n\n\tdef testMoteur(self):\n\t\t\"\"\"\n\t\t@note : non testé\n\t\t\"\"\"\n\t\t#attend que les moteurs se mettent en route\n\t\tencours = 0\n\t\twhile encours == 0: \n\t\t\tencours = variable.Communication.send(True, variable.Motors.adrReady)\n\t\t#les moteurs ont démarrés\n\t\twhile encours == 1:\n\t\t\tencours = variable.Communication.send(True, variable.Motors.adrReady)\n\n\t\tlogging.info(\"Le robot a avancé de 20cm\")\n\t\t\n\t\tvariable.Motors.PrepareToSend(angle = 0, distance = 200 , recul = True)\n\t\t#attend que les moteurs se mettent en route\n\t\tencours = 0\n\n\t\twhile encours == 0: \n\t\t\tencours = variable.Communication.send(True, variable.Motors.adrReady)\n\t\t#les moteurs ont démarrés\n\t\twhile encours == 1:\n\t\t\tencours = variable.Communication.send(True, variable.Motors.adrReady)\n\n\t\tlogging.info(\"Le robot a reculé de 20cm\")\n","sub_path":"initialisation.py","file_name":"initialisation.py","file_ext":"py","file_size_in_byte":5419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"333909835","text":"from odoo import models,api,fields \n\nclass VistaEnviarMensaje(models.TransientModel):\n _name = \"vista.enviarmensaje\"\n mensaje = fields.Char(\"Mensaje\")\n celular = fields.Char(\"Celular\")\n company_id = fields.Many2one(\"res.company\")\n partner_id = fields.Many2one(\"res.partner\",string=\"Cliente\")\n \n \"\"\"\n def _default_company(self):\n company_id = self.env['res.company']._company_default_get('aws_sns_sms')\n return company_id\n \"\"\"\n def btn_enviar_mensaje(self):\n phonenumber = self.celular\n message = self.mensaje\n multiple_mensajes = True\n try:\n self.company_id.send_sms(phonenumber,message,multiple_mensajes)\n self.env['aws_sns_sms.sms_log'].create({\n 'phonenumber': phonenumber,\n 'message': message,\n 'state': 'Enviado',\n 'company_id': self.company_id.id,\n 'partner_id':self.partner_id.id\n })\n except Exception as e:\n self.env['aws_sns_sms.sms_log'].create({\n 'phonenumber': phonenumber,\n 'message': message,\n 'state': e,\n 'company_id': self.company_id.id,\n 'partner_id':self.partner_id.id\n })\n \n\n\nclass ResPartner(models.Model):\n _inherit = 'res.partner'\n msg_ids = fields.One2many(\"aws_sns_sms.sms_log\",\"partner_id\",string=\"Mensajes\")\n\n\n def btn_vista_envio_mensaje(self):\n view_id = self.env.ref(\"aws_sns_sms.view_form_enviar_mensaje\").id\n return {\n \"type\":\"ir.actions.act_window\",\n \"views\":[(view_id,\"form\")],\n \"res_model\":\"vista.enviarmensaje\",\n \"target\":\"new\",\n \"context\":{\n \"default_partner_id\":self.id,\n \"default_celular\":self.mobile,\n \"default_company_id\":self.company_id.id\n }\n }\n ","sub_path":"addons/aws_sns_sms/models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"180049492","text":"from flask import Blueprint, render_template, request, redirect, url_for, session\n\nimport AditiFlix_App.users.services as services\nimport AditiFlix_App.adapters.movie_repository as repo\n\n\n\nuser_blueprint = Blueprint(\n 'user_bp', __name__,url_prefix='/user')\n\n\n@user_blueprint.route('/homepage', methods=['GET'])\ndef user_home():\n watchlist = []\n watched = []\n user = None\n try:\n loggedin = session['username']\n user = services.get_user(session['username'], repo.repo_instance)\n loggedin = True\n watchlist = user.watchlist\n watched = user.watched_movies\n username = user.username\n except:\n loggedin = False\n return render_template(\n 'user.html',\n loggedin=loggedin,\n movieList=watchlist,\n watchedList=watched,\n user=user,\n user_homepage=url_for('user_bp.user_home')\n )","sub_path":"AditiFlix_App/users/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"368691065","text":"import json\n\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import render, get_object_or_404\nfrom django.urls import reverse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nfrom django.views.generic import ListView, TemplateView\nfrom django.views.generic.detail import DetailView\n\nfrom .models import Brand, ProductItem, ProductGroup, SexType, ProductType, SubProductType, UserLike, CartItem, Size, \\\n Product\nfrom .modules import soho_products\nfrom .modules.authentication import create_profile, is_content_provider\nfrom .modules.brands import put_navigation_menu_brands\nfrom .modules.pagination import get_paginated_objects, get_url_parameters\nfrom .modules.themes import put_theme_settings\n\nMINIMUM_USERNAME_LENGTH = 3\nMINIMUM_PASSWORD_LENGTH = 5\n\n\nclass ShopView:\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n put_navigation_menu_brands(context)\n soho_products.put_navigation_menu_groups_and_types(context)\n put_theme_settings(context)\n if self.request.user.is_authenticated:\n context['cart_items'] = soho_products.get_cart_items(self.request.user)\n return context\n\n\n@method_decorator(ensure_csrf_cookie, name='dispatch')\nclass BrandsListView(ShopView, ListView):\n model = Brand\n template_name = 'shop/brands/Brands.html'\n paginate_by = 12\n\n\n@method_decorator(ensure_csrf_cookie, name='dispatch')\nclass BrandDetailView(ShopView, DetailView):\n model = Brand\n template_name = 'shop/brands/Brand.html'\n\n def get_object(self, queryset=None):\n return get_object_or_404(\n Brand, name=self.kwargs['name']\n )\n\n\n@method_decorator(ensure_csrf_cookie, name='dispatch')\nclass ProductItemDetailView(ShopView, DetailView):\n model = ProductItem\n template_name = 'shop/products/Product.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n product = self.object.product\n product_items = ProductItem.objects.filter(product=product)\n context['items'] = product_items\n categorizations = []\n categorizations_info = {}\n for category in product.categories.all():\n categorization = category.categorization\n if not (categorization in categorizations):\n categorizations.append(categorization)\n categorizations_info[str(categorization.pk)] = [\n category,\n ]\n else:\n categorizations_info[str(categorization.pk)].append(category)\n categorizations_list = []\n for categorization in categorizations:\n categorizations_list.append(\n {\n 'farsi_name': categorization.farsi_name,\n 'categories': categorizations_info[str(categorization.pk)]\n }\n )\n context['categorizations'] = categorizations_list\n sizes = self.object.sizes\n size = self.request.GET.get('size')\n if size:\n size = sizes.get(farsi_name=size)\n else:\n size = sizes.all()[0]\n context['selected_size'] = size\n\n return context\n\n\n@method_decorator(ensure_csrf_cookie, name='dispatch')\nclass ProductsView(ShopView, TemplateView):\n def get_product_items(self, **kwargs):\n products = self.get_products(**kwargs)\n size = self.request.GET.get('size')\n color = self.request.GET.get('color')\n category = self.request.GET.get('category')\n brand = self.request.GET.get('brand')\n product_items = soho_products.filter_product_items(products, size, color, category, brand)\n return product_items\n\n def get_pagination_parameters(self, context):\n pagination_parameters = []\n if context['selected_color']:\n pagination_parameters.append(\n {'name': 'color', 'value': context['selected_color']}\n )\n if context['selected_size']:\n pagination_parameters.append(\n {'name': 'size', 'value': context['selected_size']}\n )\n if context['selected_category']:\n pagination_parameters.append(\n {'name': 'category', 'value': context['selected_category']}\n )\n return pagination_parameters\n\n def set_url_parameters(self, context):\n context['selected_size'] = self.request.GET.get('size')\n context['selected_color'] = self.request.GET.get('color')\n context['selected_category'] = self.request.GET.get('category')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n products = self.get_products(**kwargs)\n context['sizes'] = soho_products.get_products_sizes(products)\n context['colors'] = soho_products.get_products_colors(products)\n context['categorizations'] = soho_products.get_products_categorizations(products)\n self.set_url_parameters(context)\n context['product_items'] = get_paginated_objects(\n self.get_product_items(**kwargs), self.request\n )\n pagination_parameters = self.get_pagination_parameters(context)\n context['link_extras'] = get_url_parameters(pagination_parameters)\n return context\n\n\nclass ProductsGroupView(ProductsView):\n template_name = 'shop/products/Products.html'\n\n def get_products(self, **kwargs):\n return soho_products.get_all_products(kwargs['group'], kwargs['sex'])\n\n def get_pagination_parameters(self, context):\n pagination_parameters = super().get_pagination_parameters(context)\n if context['selected_brand']:\n pagination_parameters.append(\n {'name': 'brand', 'value': context['selected_brand']}\n )\n return pagination_parameters\n\n def set_url_parameters(self, context):\n super().set_url_parameters(context)\n context['selected_brand'] = self.request.GET.get('brand')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['group'] = get_object_or_404(ProductGroup, farsi_name=kwargs['group'])\n context['sex_type'] = get_object_or_404(SexType, farsi_name=kwargs['sex'])\n soho_products.put_product_types(\n soho_products.get_all_products(\n kwargs['group'], kwargs['sex']\n ), context\n )\n products = self.get_products(**kwargs)\n context['products_brands'] = soho_products.get_products_brands(products)\n return context\n\n\nclass BrandProductsView(ProductsView):\n template_name = 'shop/brands/Brand.html'\n\n def get_products(self, **kwargs):\n return soho_products.get_all_brand_products(kwargs['name'])\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['brand'] = get_object_or_404(Brand, name=kwargs['name'])\n soho_products.put_product_types(\n soho_products.get_all_brand_products(kwargs['name']), context\n )\n return context\n\n\nclass LikedProductsView(LoginRequiredMixin, ProductsView):\n template_name = 'shop/products/LikedProducts.html'\n\n def get_products(self, **kwargs):\n return soho_products.get_all_liked_products(self.request.user)\n\n def get_pagination_parameters(self, context):\n pagination_parameters = super().get_pagination_parameters(context)\n if context['selected_brand']:\n pagination_parameters.append(\n {'name': 'brand', 'value': context['selected_brand']}\n )\n return pagination_parameters\n\n def set_url_parameters(self, context):\n super().set_url_parameters(context)\n context['selected_brand'] = self.request.GET.get('brand')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n soho_products.put_product_types(\n soho_products.get_all_liked_products(self.request.user), context\n )\n products = self.get_products(**kwargs)\n context['products_brands'] = soho_products.get_products_brands(products)\n return context\n\n\nclass ProductTypeView:\n def get_product_type_products(self, products, **kwargs):\n product_type = get_object_or_404(\n ProductType, farsi_name=kwargs['product_type']\n )\n products = soho_products.filter_product_type(products, product_type)\n return products\n\n def put_selected_product_type(self, context, **kwargs):\n product_type = get_object_or_404(\n ProductType, farsi_name=kwargs['product_type']\n )\n context['selected'] = product_type\n\n\nclass GroupProductTypeView(ProductsGroupView, ProductTypeView):\n def get_products(self, **kwargs):\n products = super().get_products(**kwargs)\n return self.get_product_type_products(products, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n super().put_selected_product_type(context, **kwargs)\n return context\n\n\nclass BrandProductTypeView(BrandProductsView, ProductTypeView):\n def get_products(self, **kwargs):\n products = super().get_products(**kwargs)\n return self.get_product_type_products(products, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n super().put_selected_product_type(context, **kwargs)\n return context\n\n\nclass LikedProductTypeView(LikedProductsView, ProductTypeView):\n def get_products(self, **kwargs):\n products = super().get_products(**kwargs)\n return self.get_product_type_products(products, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n super().put_selected_product_type(context, **kwargs)\n return context\n\n\nclass SubProductTypeView:\n def get_sub_product_type_products(self, products, **kwargs):\n sub_product_type = get_object_or_404(\n SubProductType, farsi_name=kwargs['sub_product_type']\n )\n products = soho_products.filter_sub_product_type(products, sub_product_type)\n return products\n\n def put_selected_sub_product_type(self, context, **kwargs):\n sub_product_type = get_object_or_404(\n SubProductType, farsi_name=kwargs['sub_product_type']\n )\n context['selected'] = sub_product_type\n return context\n\n\nclass GroupSubProductTypeView(GroupProductTypeView, SubProductTypeView):\n def get_products(self, **kwargs):\n products = super().get_products(**kwargs)\n return self.get_sub_product_type_products(products, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n super().put_selected_sub_product_type(context, **kwargs)\n return context\n\n\nclass BrandSubProductTypeView(BrandProductTypeView, SubProductTypeView):\n def get_products(self, **kwargs):\n products = super().get_products(**kwargs)\n return self.get_sub_product_type_products(products, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n super().put_selected_sub_product_type(context, **kwargs)\n return context\n\n\nclass LikedSubProductsTypeView(LikedProductTypeView, SubProductTypeView):\n def get_products(self, **kwargs):\n products = super().get_products(**kwargs)\n return self.get_sub_product_type_products(products, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n super().put_selected_sub_product_type(context, **kwargs)\n return context\n\n\nclass LikeProduct(LoginRequiredMixin, View):\n def post(self, request, pk):\n user_like, created = UserLike.objects.get_or_create(\n user=request.user, product_item=get_object_or_404(\n ProductItem, pk=pk\n )\n )\n return JsonResponse(\n {\n 'status': 'ok',\n 'created': created,\n }\n )\n\n\nclass RemoveLike(LoginRequiredMixin, View):\n def post(self, request, pk):\n user_like = get_object_or_404(\n UserLike, user=request.user, product_item=get_object_or_404(\n ProductItem, pk=pk\n )\n )\n user_like.delete()\n return JsonResponse(\n {\n 'status': 'ok',\n 'deleted': True,\n }\n )\n\n\nclass UpdateShoppingCart(LoginRequiredMixin, View):\n def post(self, request):\n CartItem.objects.filter(user=request.user).delete()\n cart_items = request.POST.getlist('cart_items[]')\n for cart_item in cart_items:\n cart_item = json.loads(cart_item)\n product_item_pk = int(cart_item['pk'])\n size_pk = int(cart_item['size'])\n quantity = int(cart_item['quantity'])\n size = get_object_or_404(Size, pk=size_pk)\n product_item = get_object_or_404(\n ProductItem, pk=product_item_pk, sizes=size\n )\n CartItem.objects.create(\n user=request.user,\n product_item=product_item,\n size=size,\n quantity=quantity\n )\n return JsonResponse(\n {\n 'status': 'ok'\n }\n )\n\n\nclass UpdateProductDescription(LoginRequiredMixin, View):\n def post(self, request, pk):\n if is_content_provider(request.user):\n product_description = request.POST.get('product-description')\n product = get_object_or_404(\n Product, pk=pk\n )\n product.product_description = product_description\n product.save()\n return HttpResponseRedirect(\n reverse(\n 'shop:product_page',\n kwargs={'pk': product.productitem_set.all()[0].pk}\n )\n )\n return HttpResponseRedirect(reverse('shop:home'))\n\n\n@ensure_csrf_cookie\ndef home(request):\n items = ProductItem.objects.all()\n context = {\n 'items': items,\n }\n put_navigation_menu_brands(context)\n soho_products.put_navigation_menu_groups_and_types(context)\n put_theme_settings(context)\n if request.user.is_authenticated:\n context['cart_items'] = soho_products.get_cart_items(request.user)\n return render(request, 'shop/home/Home.html', context)\n\n\n# TODO -> You have to show a 'successfully logged in' message to user\ndef logged_in(request):\n return HttpResponseRedirect(reverse('shop:home'))\n\n\ndef soho_logout(request):\n if request.user.is_authenticated:\n logout(request)\n return HttpResponseRedirect(reverse('shop:home'))\n\n\ndef signup(request):\n if request.user.is_authenticated:\n return HttpResponseRedirect(reverse('shop:home'))\n username_error = False\n password_error = False\n password_repeat_error = False\n username_exists_error = False\n username = ''\n male = False\n female = True\n homosexual = False\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n gender = int(request.POST.get('gender'))\n if gender == 1:\n male = True\n female = False\n homosexual = False\n elif gender == 2:\n homosexual = True\n male = False\n female = False\n password_repeat = request.POST.get('password-repeat')\n if (not username) or (len(username) < MINIMUM_USERNAME_LENGTH):\n username_error = True\n if (not password) or (len(password) < MINIMUM_PASSWORD_LENGTH):\n password_error = True\n if password != password_repeat:\n password_repeat_error = True\n if User.objects.filter(username=username).exists():\n username_exists_error = True\n if not (username_error or password_error or password_repeat_error or username_exists_error):\n User.objects.create_user(username=username, password=password)\n user = authenticate(username=username, password=password)\n create_profile(user, male, female, homosexual)\n login(request, user)\n return HttpResponseRedirect(reverse('shop:home'))\n\n context = {\n 'username': username,\n 'min_username_length': MINIMUM_USERNAME_LENGTH,\n 'min_password_length': MINIMUM_PASSWORD_LENGTH,\n 'male': male,\n 'female': female,\n 'homosexual': homosexual,\n 'username_error': username_error,\n 'password_error': password_error,\n 'password_repeat_error': password_repeat_error,\n 'username_exists': username_exists_error,\n }\n put_navigation_menu_brands(context)\n soho_products.put_navigation_menu_groups_and_types(context)\n put_theme_settings(context)\n return render(request, 'shop/authentication/Signup.html', context)\n\n\ndef signin(request):\n if request.user.is_authenticated:\n return HttpResponseRedirect(reverse('shop:home'))\n authentication_failed = False\n username = ''\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n next_url = request.POST.get('next', None)\n if next_url:\n return HttpResponseRedirect(next_url)\n else:\n return HttpResponseRedirect(reverse('shop:home'))\n else:\n authentication_failed = True\n context = {\n 'username': username,\n 'authentication_failed': authentication_failed,\n }\n put_navigation_menu_brands(context)\n soho_products.put_navigation_menu_groups_and_types(context)\n put_theme_settings(context)\n return render(request, 'shop/authentication/Login.html', context)\n","sub_path":"soho/shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"354928206","text":"#Import inbuilt module\nimport os\n\n#Import MySQL module for mysql connectivity\nimport MySQLdb\nimport pandas as pd\nfrom pandas.core.common import flatten\n\n#Import Airflow module and Operator\nfrom airflow import DAG\nfrom airflow.contrib.operators.ssh_operator import SSHOperator\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom datetime import timedelta,datetime\nfrom airflow.models import Variable\n\ndefault_args = {\n\t'owner': 'airflow',\n\t'depends_on_past': False,\n\t'start_date': datetime.today(),\n\t'retries': 1,\n\t'retry_delay': timedelta(minutes=1),\n\t}\n\ndag = DAG(\n\tdag_id='GDM_Sneha',\n\tdefault_args=default_args,\n\tdescription='A simple Dynamic DAG Example',\n\tschedule_interval=timedelta(days=1))\n\n#task_table_name=\"gdm_tasks_load1\"\n#flag_table_name=\"flags\"\n#etl_type_table_name=\"etl_type\"\n#metadata_host = Variable.get(\"gdm_host\")\n#metadata_user = Variable.get(\"gdm_user\")\n#metadata_passwd = Variable.get(\"gdm_passwd\")\n#metadata_db = Variable.get(\"gdm_db\")\nssh_conn_id = \"ssh_emr\"\n\n#db = MySQLdb.connect(host=metadata_host, user=metadata_user, passwd=metadata_passwd, db=metadata_db)\n#df = pd.read_csv(\"select * from tml_etl_metadata.\"+task_table_name, con=db)\n#etl_task_type_df = pd.read_sql(\"select * from tml_etl_metadata.\"+etl_type_table_name, con=db)\n\t\ndf = pd.read_csv(\"~/test_gdm.csv\",keep_default_na = False)\netl_task_type_df = pd.read_csv(\"~/test2.csv\",keep_default_na = False)\n\nt1 = DummyOperator(\n\t\t\t\ttask_id='EXTRACT_COMPLETE',\n\t\t\t\tdag=dag)\n\nt2 = DummyOperator(\n\t\t\t\ttask_id='MERGE_COMPLETE',\n\t\t\t\tdag=dag)\n\t\t\t\t\nt9 = DummyOperator(\n\t\t\t\ttask_id='LOAD_COMPLETE',\n\t\t\t\tdag=dag)\n\t\t\t\t\t\n#t1 >> t2 >> t9\n\n\nfor idx,row in df.iterrows():\n\n\ttable_name = row['TABLE_NAME']\n\ttable_type = row['TABLE_TYPE']\n\tetl_task_type = row['ETL_TASK_TYPE'].split('|')\n\tetl_proc_wid = row['ETL_PROC_WID']\n\tlast_warehouse = row['CURRENT_DB']\n\tstaging_db = row['STAGING_DB']\n\tdriver_cores = row['DRIVER_CORES']\n\tdriver_mem = row['DRIVER_MEM']\n\texecutor_cores = row['EXECUTOR_CORES']\n\texecutor_mem = row['EXECUTOR_MEM']\n\tnum_executor = row['NUM_EXECUTOR']\n\tadditional_param = row['ADDITIONAL_PARAM']\n\tdependencies = row['DEPENDENCIES'].split('|')\n\tpartitions = row['PARTITIONS']\n\t\n\tfor e in etl_task_type:\n\t\tif e == 'EXTRACT':\n\t\t\tscript_loc = etl_task_type_df.loc[etl_task_type_df['ETL_TASK_TYPE'].str.contains('EXTRACT'), 'SCRIPT_LOC'][0]\n\t\t\tscript_name = etl_task_type_df.loc[etl_task_type_df['ETL_TASK_TYPE'].str.contains('EXTRACT'), 'SCRIPT_NAME'][0]\n\t\t\tcomplete_script_path = script_loc+script_name\n\t\t\t\n\t\t\tt3 = SSHOperator(\n\t\t\t\t\tssh_conn_id=ssh_conn_id,\n\t\t\t\t\ttask_id=str(table_name)+'_'+str(e),\t\t\t\t\t\n\t\t\t\t\tcommand= 'spark-submit --num-executors '+str(partitions)+' '+complete_script_path+' '+table_name+' '+str(partitions), \t\n\t\t\t\t\tdag=dag)\n\t\t\tt3 >> t1\n\t\t\n\t\tif e == 'MERGE':\n\t\t\tscript_loc = etl_task_type_df['SCRIPT_LOC'][1]\n\t\t\tscript_name = etl_task_type_df['SCRIPT_NAME'][1]\n\t\t\tcomplete_script_path = script_loc+script_name\n\t\t\t\n\t\t\tt3 = SSHOperator(\n\t\t\t\t\tssh_conn_id=ssh_conn_id,\n\t\t\t\t\ttask_id=str(table_name)+'_'+str(e),\t\t\t\t\t\n\t\t\t\t\tcommand= 'spark-submit --num-executors '+str(num_executor)+' --executor-cores '+str(executor_cores)+' --executor-memory '+executor_mem+' --driver-memory '+driver_mem+' --driver-cores '+str(driver_cores)+' '+complete_script_path+' '+table_name , \t\n\t\t\t\t\tdag=dag)\n\t\t\t\t\t\n\t\t\tt3 >> t2\n\t\t\tt3 << t1\n\t\t\n\t\tif e =='LOAD':\n\t\t\tscript_loc = etl_task_type_df['SCRIPT_LOC'][1]\n\t\t\tscript_name = etl_task_type_df['SCRIPT_NAME'][1]\n\t\t\tcomplete_script_path = script_loc+script_name\n\t\n\t\t\tt3 = SSHOperator(\n\t\t\t\t\tssh_conn_id=ssh_conn_id,\n\t\t\t\t\ttask_id=str(table_name)+'_'+str(e),\t\t\t\t\t\n\t\t\t\t\tcommand= 'spark-submit --num-executors '+str(num_executor)+' --executor-cores '+str(executor_cores)+' --executor-memory '+executor_mem+' --driver-memory '+driver_mem+' --driver-cores '+str(driver_cores)+' '+complete_script_path+' '+table_name , \t\n\t\t\t\t\tdag=dag)\n\t\n\t\t\tTML_dependencies = [t for t in dependencies if t.startswith('TML_')]\n\t\t\t\n\t\t\tif len(TML_dependencies) == 0:\n\t\t\t\tt9 << t3 << t2\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tfor d in TML_dependencies:\n\t\t\t\t\tt4 = SSHOperator(\n\t\t\t\t\t\t\tssh_conn_id=ssh_conn_id,\n\t\t\t\t\t\t\ttask_id= d +'_'+str(e),\t\t\t\t\t\n\t\t\t\t\t\t\tcommand= 'spark-submit --num-executors '+str(num_executor)+' --executor-cores '+str(executor_cores)+' --executor-memory '+executor_mem+' --driver-memory '+driver_mem+' --driver-cores '+str(driver_cores)+' '+complete_script_path+' '+table_name , \t\n\t\t\t\t\t\t\tdag=dag)\n\t\t\t\t\tt9 << t3 << t4\n\t\t\t\t\tt4 << t2\t\t\n\n","sub_path":"dags/sneha_1.py","file_name":"sneha_1.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"213249166","text":"import argparse\nimport json\nimport os\nimport shutil\n\nimport otf2\n\nTIMER_GRANULARITY = 1000000 # chrome traces uses micro seconds\n\n\nclass TensorFlowTrace2OTF2:\n\n def __init__(self, input_file):\n if not input or not os.path.isfile(input_file):\n raise Exception(\"No chrome trace found\")\n\n self._input_file = input_file\n self._process_map = {}\n self._function_map = {}\n self._metric_map = {}\n self._dataflow_start = None\n\n def convert_trace(self, output_dir):\n if not output_dir:\n raise Exception(\"No output trace\")\n\n with open(self._input_file) as json_file:\n chrome_data = json.load(json_file)\n with otf2.writer.open(output_dir,\n timer_resolution=TIMER_GRANULARITY) as otf2_trace:\n otf2_root_node = otf2_trace.definitions.system_tree_node(\"root node\")\n otf2_system_tree_node = otf2_trace.definitions.system_tree_node(\"myHost\", parent=otf2_root_node)\n\n for chrome_event in chrome_data['traceEvents']:\n\n # Metadata Events\n if chrome_event['ph'] == 'M' and chrome_event['name'] == 'process_name':\n self.handle_metadata(chrome_event, otf2_system_tree_node, otf2_trace)\n\n # Complete Events, TensorFlow seems to not use B and E events\n elif chrome_event['ph'] == 'X' and chrome_event['cat'] == \"Op\":\n self.handle_event(chrome_event, otf2_trace)\n\n # Counter Events\n elif chrome_event['ph'] == 'C':\n self.handle_metric(chrome_event, otf2_trace)\n\n # Flow Events (start, step, end)\n elif chrome_event['ph'] in ['s', 't', 'f']:\n self.handle_dataflow(chrome_event)\n\n else:\n print(\"untrackt event found: {}\".format(chrome_event))\n\n #TODO Map new created processes for only collecting one metric to process with same name\n def handle_metric(self, chrome_event, otf2_trace):\n metric_name = chrome_event['name']\n chrome_process_id = chrome_event['pid']\n chrome_thread_id = chrome_event['tid']\n if metric_name == 'Allocated Bytes':\n if chrome_event['name'] not in self._metric_map:\n self.otf2_add_metric(otf2_trace, metric_name, 'Bytes')\n\n if chrome_thread_id >= len(self._process_map[chrome_process_id]['threads']):\n self.otf2_add_thread(chrome_thread_id, chrome_process_id, otf2_trace)\n\n metric_value = chrome_event['args']['Allocator Bytes in Use']\n otf2_thread = self._process_map[chrome_process_id]['threads'][chrome_thread_id]\n otf2_thread.metric(chrome_event['ts'], self._metric_map[metric_name], metric_value)\n\n def otf2_add_metric(self, otf2_trace, name, unit):\n metric = otf2_trace.definitions.metric(name, unit=unit)\n self._metric_map[name] = metric\n\n def handle_metadata(self, chrome_event, otf2_system_tree_node, otf2_trace):\n otf2_location_group = otf2_trace.definitions.location_group(chrome_event['args']['name'],\n system_tree_parent=otf2_system_tree_node)\n self._process_map[chrome_event['pid']] = {'location': otf2_location_group, 'threads': [],\n 'name': chrome_event['args']['name']}\n\n def handle_event(self, chrome_event, otf2_trace):\n chrome_process_id = chrome_event['pid']\n chrome_thread_id = chrome_event['tid']\n if chrome_thread_id >= len(self._process_map[chrome_process_id]['threads']):\n self.otf2_add_thread(chrome_thread_id, chrome_process_id, otf2_trace)\n\n if not chrome_event['name'] in self._function_map:\n self.otf2_add_function(chrome_event['name'], otf2_trace)\n\n otf2_thread = self._process_map[chrome_process_id]['threads'][chrome_thread_id]\n otf2_function = self._function_map[chrome_event['name']]\n\n begin = chrome_event['ts']\n end = (begin + chrome_event['dur'])\n\n otf2_thread.enter(begin, otf2_function)\n otf2_thread.leave(end, otf2_function)\n\n def otf2_add_thread(self, chrome_thread_id, chrome_process_id, otf2_trace):\n otf2_location_group = self._process_map[chrome_process_id]['location']\n otf2_thread = otf2_trace.event_writer(\n str(self._process_map[chrome_process_id]['name']) + str(chrome_thread_id),\n group=otf2_location_group)\n self._process_map[chrome_process_id]['threads'].append(otf2_thread)\n\n def otf2_add_function(self, name, otf2_trace):\n otf2_function = otf2_trace.definitions.region(name, paradigm=otf2.Paradigm.USER)\n self._function_map[name] = otf2_function\n\n # TODO implementation of dataflow\n def handle_dataflow(self, chrome_event):\n if chrome_event['ph'] == 's':\n if self._dataflow_start is not None:\n print(\"corrupted trace in dataflow: {}\".format(chrome_event))\n self._dataflow_start = None\n self._dataflow_start = chrome_event['id']\n # dataflow handling\n\n elif chrome_event['ph'] == 't':\n if self._dataflow_start != chrome_event['id']:\n print(\"corrupted trace in dataflow: {}\".format(chrome_event))\n # dataflow handling\n\n self._dataflow_start = None\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Convert chrome traces into OTF2\")\n parser.add_argument(\n \"-i\", \"--input\",\n type=str, required=True,\n help=\"chrome tracing file\",\n )\n parser.add_argument(\n \"-o\", \"--output\",\n type=str, required=True,\n help=\"OTF2 Tracing folder\",\n )\n parser.add_argument(\n \"-c\", \"--clean\",\n action=\"store_true\",\n help=\"Clean (delete) the output folder if it exists\",\n )\n args = parser.parse_args()\n\n out_folder = args.output\n if args.clean and os.path.exists(out_folder):\n shutil.rmtree(out_folder)\n\n converter = TensorFlowTrace2OTF2(args.input)\n converter.convert_trace(out_folder)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"crome2otf2.py","file_name":"crome2otf2.py","file_ext":"py","file_size_in_byte":6299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"498943743","text":"# -*- encoding: utf-8 -*-\n'''\n@NAME :Aerosol.py\n@TIME :2021/03/25 11:10:28\n@AUTHOR :MERI\n@Email :mayqueen2016@163.com\n'''\n# Inbuild package list\n\nimport math\n\n# Site-package list\nimport scipy\nfrom scipy import integrate\nimport numpy as np\nimport miepython\n\n\n# Userdefined package list\n\n\n# CODING CONTENT\n\n# 气溶胶光学厚度\ndef AOD(beta=[], height=[]):\n '''\n 气溶胶光学厚度:介质的消光系数在垂直高度上的积分,描述气溶胶对光的削减作用\n '''\n AOD = []\n for idx in range(len(beta)):\n inter_s, err = integrate.quad(beta[idx], height[0], height[-1])\n AOD.append(inters)\n return pd.DataFrame(AOD)\n\n\n# 气溶胶质量浓度\n\n# STEP 1:消光效率的计算\ndef Q_ext(mj, r_min, r_max, r_step):\n '''\n m: 负折射率,虚数\n r :直径,微米[um]\n * m,r可以为数组,但长度必须相同\n numbda :波长,微米[um]\n '''\n r = np.arange(r_min, r_max, r_step)\n num_r = r.shape[0] # 获取x的长度\n\n m = np.empty(num_r, dtype=complex) # 创建空数组\n m.fill(mj) # 为空数组填充复折射率\n\n Q_ext, Q_sca, Q_back, g = miepython.mie(m, r)\n print(Q_ext)\n\n\nQ_ext(mj=1.5 - 1j, r_min=0.05, r_max=10.05, r_step=0.05)\n\n\n# STEP 2:气溶胶粒子谱的反演: 消光法遥感气溶胶谱反演方法\n# 求解第一类Fredholm 积分方程:https://www.guangshi.io/posts/fredholm-equation/\n# core algorithm of non-negative Tikhonov regularization with equality constraint (NNETR)\ndef NNETR(K, f, Delta, epsilon, alpha):\n # the first step\n A_nn = np.vstack((K, alpha * np.identity(K.shape[1])))\n b_nn = np.hstack((f, np.zeros(K.shape[1])))\n\n # the second step\n A_nne = np.vstack((epsilon * A_nn, np.full(A_nn.shape[1], 1.0)))\n b_nne = np.hstack((epsilon * b_nn, 1.0))\n\n # Use NNLS solver provided by scipy\n sol, residue = scipy.optimize.nnls(A_nne, b_nne)\n\n # solution should be divided by Delta (grid size)\n sol = sol / Delta\n return sol, residue\n\n\n# STEP 3:颗粒物质量浓度的计算\ndef MEE():\n pi = math.pi\n r_max = 10 # [um]\n r_min = 0.01 # [um]\n AOD = []\n rou = 2 # [g/m]\n Q_ext = 2.33\n\n# if __name__ == \"__main__\":\n# print('开始读取RCS数据')\n# RCS_PATH = r\"./Results/Channels/RCS/Channel_Rcs1.csv\"\n# Height_PATH = r\"./Results/Channels/Height/Channel_Height1.csv\"\n\n# rcs_channel = pd.read_csv(RCS_PATH,header=None,index_col=0)\n# h_channel = pd.read_csv(Height_PATH,header=None)\n\n# rcs = np.array(rcs_channel[1][:])\n# h = np.array(h_channel)\n\n# # print(h)\n# # print(rcs)\n\n# print('使用Kletthod方法反演雷达方程')\n# RM = Klett()\n\n# print('### 计算消光系数alpha')\n# alpha = RM.alpha_z(rcs,h)\n# print(alpha)","sub_path":"Lidar_main/Aerosol.py","file_name":"Aerosol.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"487806540","text":"#Tuple Create\na=()\nb=1,2,3,\"tolga\"\nc=(\"t\",)\nd=(1,2,3,\"tolga\")\ne=('Tolga',(\"ogrt\",\"prg\"))\nf=tuple(\"tolga\")\n\nprint(a,b,c,d,e,f,sep=\"\\n\")\n\nprint(e.index((\"ogrt\",\"prg\")))\n\nprint((1,2)+(3,4))\n\nprint(sorted((3,1,2)))\n\nk=(\"tolga\",\"gülcan\")\n\na,b=k\n\nprint(a,b)\nad,soyad={\"ad\":\"tolga\",\"soyad\":\"gülcan\"}.values()\nprint(ad,soyad)\n\n","sub_path":"chapter9.py","file_name":"chapter9.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"474269907","text":"from __future__ import unicode_literals\n\nimport datetime\nfrom django.conf import settings\nfrom django.utils import timezone\nimport operator\nfrom wtforms import fields, widgets\nfrom wtforms.compat import string_types\nfrom wtforms.validators import ValidationError\n\n__all__ = (\"ModelSelectField\", \"QuerySetSelectField\", \"DateTimeField\")\n\n\nclass QuerySetSelectField(fields.SelectFieldBase):\n \"\"\"A :class:`~wtforms.fields.SelectField` with the choices\n populated from the results of a Django\n :class:`~django.db.models.query.QuerySet`.\n\n .. code-block:: python\n\n category = QuerySetSelectField(queryset=Category.objects.all())\n\n The values passed in the request are the primary keys, but the\n ``data`` attribute of the field will be the actual model instance.\n\n By default each option displays the result of ``str(instance)``, but\n this can be customized by passing the ``get_label`` argument.\n\n To customize the query based on the request, you can set the\n ``queryset`` attribute after creating the form instance. For\n example, to limit the values based on the logged in user:\n\n .. code-block:: python\n\n class ArticleEdit(Form):\n title = StringField()\n series = QuerySetSelectField(allow_blank=True)\n\n def edit_article(request, id):\n article = Article.objects.get(pk=id)\n form = ArticleEdit(obj=article)\n form.series.queryset = Series.objects.filter(author=request.user)\n ...\n\n :param queryset: Populate the select with results from this query.\n :param get_label: The name of a model attribute to use to get the\n label for each item. Or a callable that takes a model instance\n and returns a label for it.\n :param allow_blank: Add a blank choice to the top of the list.\n Selecting it sets ``data`` to ``None``.\n :param blank_text: The label for the blank choice.\n \"\"\"\n\n widget = widgets.Select()\n\n def __init__(\n self,\n label=None,\n validators=None,\n queryset=None,\n get_label=None,\n allow_blank=False,\n blank_text=\"\",\n **kwargs\n ):\n super(QuerySetSelectField, self).__init__(label, validators, **kwargs)\n self.allow_blank = allow_blank\n self.blank_text = blank_text\n self._set_data(None)\n if queryset is not None:\n self.queryset = queryset.all() # Make sure the queryset is fresh\n\n if get_label is None:\n self.get_label = lambda x: x\n elif isinstance(get_label, string_types):\n self.get_label = operator.attrgetter(get_label)\n else:\n self.get_label = get_label\n\n def _get_data(self):\n if self._formdata is not None:\n for obj in self.queryset:\n if obj.pk == self._formdata:\n self._set_data(obj)\n break\n return self._data\n\n def _set_data(self, data):\n self._data = data\n self._formdata = None\n\n data = property(_get_data, _set_data)\n\n def iter_choices(self):\n if self.allow_blank:\n yield (\"__None\", self.blank_text, self.data is None)\n\n for obj in self.queryset:\n yield (obj.pk, self.get_label(obj), obj == self.data)\n\n def process_formdata(self, valuelist):\n if valuelist:\n if valuelist[0] == \"__None\":\n self.data = None\n else:\n self._data = None\n self._formdata = int(valuelist[0])\n\n def pre_validate(self, form):\n if not self.allow_blank or self.data is not None:\n for obj in self.queryset:\n if self.data == obj:\n break\n else:\n raise ValidationError(self.gettext(\"Not a valid choice\"))\n\n\nclass ModelSelectField(QuerySetSelectField):\n \"\"\"Like a :class:`QuerySetSelectField`, except takes a model class\n to query all of its objects instead of a specific query.\n\n .. code-block:: python\n\n category = ModelSelectField(model=Category)\n\n :param: model: The model to query.\n \"\"\"\n\n def __init__(self, label=None, validators=None, model=None, **kwargs):\n super(ModelSelectField, self).__init__(\n label, validators, queryset=model._default_manager.all(), **kwargs\n )\n\n\nclass DateTimeField(fields.DateTimeField):\n \"\"\"A :class:`~wtforms.fields.DateTimeField` with support for\n Django's timezone utilities.\n \"\"\"\n\n def process_formdata(self, valuelist):\n super(DateTimeField, self).process_formdata(valuelist)\n date = self.data\n if settings.USE_TZ and date is not None and timezone.is_naive(date):\n current_timezone = timezone.get_current_timezone()\n self.data = timezone.make_aware(date, current_timezone)\n\n def _value(self):\n date = self.data\n if (\n settings.USE_TZ\n and isinstance(date, datetime.datetime)\n and timezone.is_aware(date)\n ):\n self.data = timezone.localtime(date)\n return super(DateTimeField, self)._value()\n","sub_path":"src/wtforms_django/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":5111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"566669623","text":"#!/usr/bin/env python\n\n# Copyright 2017 Vertex.AI\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nfrom six import exec_\n\nimport argparse\nimport errno\nimport json\nimport numpy as np\nimport os\nimport sys\nimport time\nimport random\n\n\nclass StopWatch(object):\n def __init__(self, use_callgrind):\n self.__start = None\n self.__stop = None\n self.__use_callgrind = use_callgrind\n self.__callgrind_active = False\n self.__total = 0.0\n\n def start_outer(self):\n # Like start(), but does not turn on callgrind.\n self.__start = time.time()\n\n def start(self):\n self.__start = time.time()\n if self.__use_callgrind:\n os.system('callgrind_control --instr=on %d' % (os.getpid(),))\n self.__callgrind_active = True\n\n def stop(self):\n if self.__start is not None:\n stop = time.time()\n self.__total += stop - self.__start\n self.__start = None\n if self.__callgrind_active:\n self.__callgrind_active = False\n os.system('callgrind_control --instr=off %d' % (os.getpid(),))\n\n def elapsed(self):\n return self.__total\n\n\nclass Output(object):\n def __init__(self):\n self.contents = None\n self.precision = 'untested'\n\n\ndef has_plaid():\n try:\n import plaidml.keras\n return True\n except ImportError:\n return False\n\nSUPPORTED_NETWORKS = ['inception_v3', 'mobilenet', 'resnet50', 'vgg16', 'vgg19', 'xception']\n\ndef main():\n exit_status = 0\n parser = argparse.ArgumentParser()\n plaidargs = parser.add_mutually_exclusive_group()\n plaidargs.add_argument('--plaid', action='store_true')\n plaidargs.add_argument('--no-plaid', action='store_true')\n parser.add_argument('--fp16', action='store_true')\n parser.add_argument('-v', '--verbose', type=int, nargs='?', const=3)\n parser.add_argument('--result', default='/tmp/plaidbench_results')\n parser.add_argument('--callgrind', action='store_true')\n parser.add_argument('-n', '--examples', type=int, default=1024)\n parser.add_argument('--epochs', type=int, default=8)\n parser.add_argument('--batch-size', type=int, default=1)\n parser.add_argument('--train', action='store_true')\n parser.add_argument('--print-stacktraces', action='store_true')\n parser.add_argument('module', choices=SUPPORTED_NETWORKS)\n args = parser.parse_args()\n\n if args.plaid or (not args.no_plaid and has_plaid()):\n print('Using PlaidML backend.')\n import plaidml.keras\n if args.verbose:\n plaidml._internal_set_vlog(args.verbose)\n plaidml.keras.install_backend()\n if args.fp16:\n from keras.backend.common import set_floatx\n set_floatx('float16')\n\n batch_size = int(args.batch_size)\n epochs = args.epochs\n examples = args.examples\n epoch_size = examples // epochs\n\n if epochs > examples:\n \traise ValueError('The number of epochs must be less than the number of examples.')\n if batch_size > epoch_size:\n raise ValueError('The number of examples per epoch must be less than the batch size.')\n if examples%epochs != 0:\n raise ValueError('The number of examples must be divisible by the number of epochs.')\n if epoch_size%batch_size != 0:\n raise ValueError('The number of examples per epoch is not divisble by the batch size.')\n\n if args.train:\n # Load the dataset and scrap everything but the training images\n # cifar10 data is too small, but we can upscale\n from keras.datasets import cifar10\n print('Loading the data')\n (x_train, y_train_cats), (x_test, y_test_cats) = cifar10.load_data()\n from keras.utils.np_utils import to_categorical\n x_train = x_train[:epoch_size]\n y_train_cats = y_train_cats[:epoch_size]\n y_train = to_categorical(y_train_cats, num_classes=1000)\n else:\n this_dir = os.path.dirname(os.path.abspath(__file__))\n cifar_path = os.path.join(this_dir, 'cifar16.npy')\n x_train = np.load(cifar_path).repeat(1 + batch_size/16, axis=0)[:batch_size]\n y_train_cats = None\n\n stop_watch = StopWatch(args.callgrind)\n compile_stop_watch = StopWatch(args.callgrind)\n output = Output()\n data = {\n 'example': args.module\n }\n stop_watch.start_outer()\n compile_stop_watch.start_outer()\n try:\n this_dir = os.path.dirname(os.path.abspath(__file__))\n module = os.path.join(this_dir, 'networks', '%s.py' % args.module)\n globals = {}\n exec_(open(module).read(), globals)\n\n x_train = globals['scale_dataset'](x_train)\n\n model = globals['build_model']()\n print(\"\\nModel loaded.\")\n\n # Prep the model and run an initial un-timed batch\n print(\"Compiling and running initial batch, batch_size={}\".format(batch_size))\n compile_stop_watch.start()\n optimizer = 'sgd'\n if args.module[:3] == 'vgg':\n from keras.optimizers import SGD\n optimizer = SGD(lr=0.0001)\n model.compile(optimizer=optimizer, loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n if args.train:\n # training\n x = x_train[:epoch_size]\n y = y_train[:epoch_size]\n model.train_on_batch(x_train[0:batch_size], y_train[0:batch_size])\n compile_stop_watch.stop()\n for i in range(args.epochs):\n if i == 1:\n print('Doing the main timing')\n stop_watch.start()\n history = model.fit(x=x, y=y, batch_size=batch_size, epochs=1, shuffle=False, initial_epoch=0)\n stop_watch.stop()\n time.sleep(.025 * random.random())\n if i == 0:\n output.contents = [history.history['loss']]\n output.contents = np.array(output.contents)\n else:\n # inference\n y = model.predict(x=x_train, batch_size=batch_size)\n compile_stop_watch.stop()\n output.contents = y\n print('Warmup')\n \n for i in range(32//batch_size + 1):\n y = model.predict(x=x_train, batch_size=batch_size)\n # Now start the clock and run 100 batches\n print('Doing the main timing')\n\n for i in range(examples//batch_size):\n stop_watch.start()\n y = model.predict(x=x_train, batch_size=batch_size)\n stop_watch.stop()\n time.sleep(.025 * random.random())\n\n stop_watch.stop()\n compile_stop_watch.stop()\n execution_duration = stop_watch.elapsed()\n compile_duration = compile_stop_watch.elapsed()\n data['execution_duration'] = execution_duration\n data['compile_duration'] = compile_duration\n print('Example finished, elapsed: {} (compile), {} (execution)'.format(compile_duration, execution_duration))\n data['precision'] = output.precision\n except Exception as ex:\n print(ex)\n data['exception'] = str(ex)\n exit_status = -1\n if args.print_stacktraces:\n raise\n print('Set --print-stacktraces to see the entire traceback')\n finally:\n try:\n os.makedirs(args.result)\n except OSError as ex:\n if ex.errno != errno.EEXIST:\n print(ex)\n return\n with open(os.path.join(args.result, 'result.json'), 'w') as out:\n json.dump(data, out)\n if isinstance(output.contents, np.ndarray):\n np.save(os.path.join(args.result, 'result.npy'), output.contents)\n sys.exit(exit_status)\n\nif __name__ == '__main__':\n main()\n","sub_path":"plaidbench.py","file_name":"plaidbench.py","file_ext":"py","file_size_in_byte":8210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"603217576","text":"import sqlite3\r\nconnection = sqlite3.connect(\"college.db\")\r\n\r\ncursor = connection.cursor()\r\n\r\nsql_command = ''' \r\nCREATE TABLE attendance(\r\nrollno INTEGER,\r\nfname VARCHAR(20),\r\nlname VARCHAR(30),\r\nday DATE,\r\nstatus CHAR(1) ); '''\r\n\r\ncursor.execute(sql_command)\r\n\r\n","sub_path":"attendance.py","file_name":"attendance.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"224396599","text":"import csv\nimport os\nimport sys\nimport quaternion\nimport argparse\nimport struct\nimport numpy as np\nimport pandas as pd\nimport time\nimport utils\nfrom shutil import copyfile\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom tqdm import tqdm\n\n# python filter_panos.py --coords_file \"../poses/coords.csv\" --output_file \"data/run_1/processed/pos_ang\" --pano_path \"/Users/martinweiss/code/academic/hyrule-gym/data/data/run_1/panos/\"\n\n\nparser = argparse.ArgumentParser(description='Filter some coords.')\nparser.add_argument('--coords_file', type=str, help='a file containing the coords')\nparser.add_argument('--output_file', type=str, help='a file where we write the \"pos_ang\" numpy array')\nparser.add_argument('--pano_src', type=str, default=\"/Volumes/Martin Weiss\\'s External Drive/corl/panos\", help='source location for panos')\nparser.add_argument('--pano_dst', type=str, default=\"/Users/martinweiss/code/academic/hyrule-gym/data/data/corl/panos/\", help='dest location for panos')\nargs = parser.parse_args()\n\n\ndef filter_poses(poses):\n to_filter = set()\n for i, node1 in tqdm(poses.iterrows(), leave=False, total=poses.shape[0], desc=\"filtering nodes\"):\n for j, node2 in utils.find_nearby_nodes(poses, node1, 0.5).iterrows():\n if i == j or j in to_filter or i in to_filter:\n continue\n to_filter.add(j)\n return poses[~poses.index.isin(to_filter)]\n\n\ndef save_poses(filename, poses):\n np.save(filename, poses.values)\n\n\n# Filter the poses\nposes = pd.read_csv(args.coords_file, delimiter=\",\")\nposes.columns = ['index', 'timestamp', 'x', 'y', 'z', 'angle']\nROT_MAT = np.array([[0, 0, 1], [-1, 0, 0], [0, -1, 0]])\nfiltered_poses = filter_poses(poses)\nsave_poses(args.output_file, filtered_poses)\nprint(\"num poses filtered: \" + str(len(poses) - len(filtered_poses)))\nprint(\"num poses remaining: \" + str(len(filtered_poses)))\n\n# Write an image of the filtered poses\nx_f = [pose.x for idx, pose in filtered_poses.iterrows()]\ny_f = [pose.y for idx, pose in filtered_poses.iterrows()]\nplt.scatter(x_f, y_f)\nplt.savefig(\"filtered_poses.png\")\n\n# Copy the selected panos from the HD\npaths = []\nfor d in os.listdir(args.pano_src):\n path = args.pano_src + \"/\" + d\n for f in os.listdir(path):\n paths.append(path + \"/\" + f)\n\ngood_paths = []\nnums = [str(int(x.timestamp * 30)).zfill(6) for idx, x in filtered_poses.iterrows()]\nfor path in paths:\n for num in nums:\n if num in path:\n good_paths.append(path)\n\n# os.mkdir(args.pano_dst)\nfor path in tqdm(good_paths, total=len(good_paths)):\n copyfile(path, args.pano_dst + path.split(\"/\")[-1])\n","sub_path":"data/scripts/filter_panos.py","file_name":"filter_panos.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"145470208","text":"#import socket\n\n#from socket import *\n\nimport socket\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #create datagram socket\ns.bind((\"\", 10001)) #bind the socket to some port 9001\n#s.settimeout(20)\n\nwhile True:\n data, addr = s.recvfrom(16) #wait for a message\n resp = b\"Welcome to Python Networking!!!\"\n s.sendto(resp.upper(), addr) #send response\n\n#sockets use transport layer interface sockets\n#after accept fork()\n#no connection is established\n#only sends and receives packets\n\n#Terminal 1 :\n# cisco@cisco-ThinkPad-T430:~/PycharmProjects/training/day_eight$ python3 udp_server.py\n\n#Terminal 2:\n# cisco@cisco-ThinkPad-T430:~/PycharmProjects/training/day_eight$ python3 socket_udp_client_echo.py\n# b'WELCOME TO PYTHO'\n","sub_path":"PycharmProjects/training/day_eight/udp_server.py","file_name":"udp_server.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"575012488","text":"from .. import names \nfrom ..ndtypes import ArrayT, lower_rank\nfrom ..syntax import AllPairs, Map, Assign, Return, TypedFn, Var\nfrom transform import Transform\n\nclass MapifyAllPairs(Transform):\n def transform_stmt(self, stmt):\n \"\"\"\n Assume that all adverbs occur only at the outer level of bindings, so skip\n recursive evaluation of expressions\n \"\"\"\n\n if stmt.__class__ is Assign and stmt.rhs.__class__ is AllPairs:\n stmt.rhs = self.transform_AllPairs(stmt.rhs)\n elif stmt.__class__ is Return and stmt.value.__class__ is AllPairs:\n stmt.value = self.transform_AllPairs(stmt.value)\n return stmt\n\n def transform_AllPairs(self, expr):\n \"\"\"\n Transform each AllPairs(f, X, Y) operation into a pair of nested maps:\n def g(x_elt):\n def h(y_elt):\n return f(x_elt, y_elt)\n return map(g, X)\n \"\"\"\n\n if expr.out is not None:\n return expr\n\n # if the adverb function is a closure, give me all the values it\n # closes over\n closure_elts = self.closure_elts(expr.fn)\n n_closure_elts = len(closure_elts)\n\n # strip off the closure wrappings and give me the underlying TypedFn\n fn = self.get_fn(expr.fn)\n\n # the two array arguments to this AllPairs adverb\n x, y_outer = expr.args\n\n x_elt_name = names.fresh('x_elt')\n x_elt_t = fn.input_types[n_closure_elts]\n x_elt_var = Var(x_elt_name, type = x_elt_t)\n y_inner_name = names.fresh('y')\n y_inner = Var(y_inner_name, type = y_outer.type)\n\n inner_closure_args = []\n for (i, elt) in enumerate(closure_elts):\n t = elt.type\n if elt.__class__ is Var:\n name = names.refresh(elt.name)\n else:\n name = names.fresh('closure_arg%d' % i)\n inner_closure_args.append(Var(name, type = t))\n\n inner_arg_names = []\n inner_input_types = []\n type_env = {}\n\n for var in inner_closure_args + [y_inner, x_elt_var]:\n type_env[var.name] = var.type\n inner_arg_names.append(var.name)\n inner_input_types.append(var.type)\n\n inner_closure_rhs = self.closure(fn, inner_closure_args + [x_elt_var])\n\n inner_result_t = lower_rank(expr.type, 1)\n inner_fn = TypedFn(\n name = names.fresh('allpairs_into_maps_wrapper'),\n arg_names = tuple(inner_arg_names),\n input_types = tuple(inner_input_types),\n return_type = inner_result_t,\n type_env = type_env,\n body = [\n Return(Map(inner_closure_rhs,\n args=[y_inner],\n axis = expr.axis,\n type = inner_result_t))\n ]\n )\n closure = self.closure(inner_fn, closure_elts + [y_outer])\n return Map(closure, [x], axis = expr.axis, type = expr.type)\n","sub_path":"parakeet/old/mapify_allpairs.py","file_name":"mapify_allpairs.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"160476102","text":"from picamera import PiCamera\nfrom time import sleep\nfrom datetime import datetime\n\n#To do:\n# print altidute, time, etc. onto the picture at bottom right corner. \n\nclass Camera:\n def __init__(self, video_path, image_path):\n self.piCam = PiCamera()\n self.video_path\n self.image_path\n self.piCam.framerate = 30\n self.piCam.brightness = 50\n self.piCam.contrast = 50\n \n def takePicture(self, x=2592, y=1944):\n # take picture\n # self.piCam.start_preview() # default res\n self.piCam.resolution = (x, y)\n now = datetime.now().strftime(\"%d-%m-%Y_%H:%M:%S\")\n self.piCam.capture(self.image_path + now +'.jpg')\n # self.piCam.stop_preview()\n sleep(2)\n \n def takeVideo(self, x=640, y=480):\n # take a video\n # self.piCam.start_preview() # default res\n self.piCam.resolution = (x, y)\n now = datetime.now().strftime(\"%d-%m-%Y_%H:%M:%S\")\n self.piCam.start_recording(self.video_path + now +'.h264')\n self.piCam.wait_recording(10)\n self.piCam.stop_recording()\n # self.piCam.stop_preview()\n sleep(2)\n \n def close(self):\n self.piCam.close()","sub_path":"SRC/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"465407769","text":"import re\n\nMEMORY_UNITS = {\"K\": 1024, \"M\": 1024 ** 2, \"G\": 1024 ** 3, \"T\": 1024 ** 4, \"P\": 1024 ** 5, \"E\": 1024 ** 6}\n\n\ndef normalize_cpu(value: str) -> float:\n try:\n x = float(value)\n except ValueError:\n x = int(re.sub(r\"milli|m\", \"\", value))\n x = x / 1000\n return float(\"{:.2f}\".format(x))\n\n\ndef normalize_memory_to_bytes(value: str) -> float:\n unit = re.sub(\"[0-9]\", \"\", value)\n mul = MEMORY_UNITS.get(unit[0], 1)\n\n v = float(re.sub(\"[^0-9]\", \"\", value))\n _bytes = v * mul\n return _bytes\n","sub_path":"caboto/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"434786207","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy_selenium import SeleniumRequest\n\n\nclass SlickspiderSpider(scrapy.Spider):\n name = 'slickspider'\n\n def start_requests(self):\n yield SeleniumRequest(\n url=\"https://slickdeals.net/computer-deals\",\n wait_time=2,\n callback=self.parse\n )\n\n def parse(self, response):\n products = response.xpath(\"//li[@class='fpGridBox grid altDeal hasPrice']\")\n for product in products:\n name = product.xpath(\".//a[contains(@class,'itemTitle')]/text()\").get()\n link = product.xpath(\".//a[@class='itemTitle bp-p-dealLink bp-c-link']/@href\").get()\n # yield {\n # 'Name': name,\n # 'User-Agent': response.request.headers['User-Agent']\n # }\n absolute = f\"https://slickdeals.net{link}\"\n yield SeleniumRequest(\n url=absolute,\n wait_time=2,\n callback=self.product_comment,\n meta={'Name': name}\n )\n\n # next_page = response.xpath(\"//a[@data-role='next-page']/@href\").get()\n # if next_page:\n # abs_url = f\"https://slickdeals.net{next_page}\"\n # print(abs_url)\n # print(\"***********************\")\n # yield SeleniumRequest(url=abs_url, wait_time=2, callback=self.parse, headers= {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36'})\n\n def product_comment(self, response):\n name = response.meta['Name']\n comments = response.xpath(\"//div[@id='commentsBox']//span/text()\").get()\n yield {\n 'Name': name,\n 'Comment': comments\n }\n\n print('''\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n .\n\n\n ''')","sub_path":"seleniumscripts.py","file_name":"seleniumscripts.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"270266975","text":"from rest_framework import viewsets, mixins\nimport requests\n\nfrom app.constants import GET_GYM_ID_SEARCH_URL, GET_DETAILS_API_KEY, GET_GYM_DETAILS_SEARCH_URL, \\\n GET_GYM_PHOTO_SEARCH_URL\nfrom core.gym.model import Gym\nfrom core.gym_details.model import GymDetails\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\n\n\nclass GymDetailsViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):\n queryset = GymDetails.objects.all()\n serializer_class = []\n\n @action(detail=False, methods=['put'], url_path='update_photos')\n def update_photos(self, request, *args):\n gym_details_api_id = request.data.get('place_id')\n photos = request.data.get('photos')\n\n gym_details_object = GymDetails.objects.get(gym_details_api_id=gym_details_api_id)\n gym_details_object.data[0]['photos'] = photos\n gym_details_object.save()\n\n return Response(gym_details_object.data)\n\n def list(self, request, *args, **kwargs):\n gym_search_query = request.query_params.get('gym_search_query')\n gym_id = request.query_params.get('gym_id')\n\n gym_details = self.process_gym_details_request(gym_search_query, gym_id)\n\n return Response(gym_details)\n\n def process_gym_details_request(self, gym_search_query, gym_id):\n gym_object = Gym.objects.get(id=gym_id)\n gym_has_details = hasattr(gym_object, 'gymdetails')\n\n if gym_has_details:\n gym_details = gym_object.gymdetails.data\n gym_details[0][\"place_id\"] = gym_object.gymdetails.gym_details_api_id\n return gym_details\n else:\n gym_details_api_id = self.get_gym_details_api_id(gym_search_query)\n\n gym_details = self.get_gym_details(gym_details_api_id)\n gym_details[\"place_id\"] = gym_details_api_id\n\n self.create_details_data(gym_object, gym_details, gym_details_api_id)\n\n # gym_details is an dictionary in a list when returned from the database.\n # gym_details is an dictionary when it is fetched from the api.\n # To maintain a contract with the frontend, both should be returned as an dictionary in a list\n return [gym_details]\n\n def create_details_data(self, gym_object, gym_details, gym_details_api_id):\n gym_details_object = GymDetails(gym=gym_object, gym_details_api_id=gym_details_api_id, data=[gym_details])\n gym_details_object.save()\n\n def get_gym_details(self, gym_details_api_id):\n parameters = {\n \"place_id\": gym_details_api_id,\n \"fields\": \"international_phone_number,rating,review,opening_hours\",\n \"key\": GET_DETAILS_API_KEY\n }\n\n url = GET_GYM_DETAILS_SEARCH_URL\n r = requests.get(url=url, params=parameters)\n\n if r.status_code != 200:\n raise Exception(\"failed to locate gym\")\n\n data = r.json()\n gym_details = data.get(\"result\")\n\n return gym_details\n\n def get_gym_details_api_id(self, gym_search_query):\n parameters = {\n \"input\": gym_search_query,\n \"inputtype\": \"textquery\",\n \"fields\": \"place_id\",\n \"key\": GET_DETAILS_API_KEY\n }\n\n url = GET_GYM_ID_SEARCH_URL\n r = requests.get(url=url, params=parameters)\n\n if r.status_code != 200:\n raise Exception(\"failed to locate gym\")\n\n data = r.json()\n if len(data) == 0 or len(data.get(\"candidates\")) == 0:\n raise Exception(\"failed to locate gym\")\n\n gym_details_api_id = data.get(\"candidates\")[0].get(\"place_id\")\n\n return gym_details_api_id\n","sub_path":"back/core/gym_details/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"416453111","text":"import os\r\nimport sys\r\nfrom PySide2.QtCore import *\r\nfrom PySide2.QtWidgets import *\r\nfrom PySide2.QtGui import *\r\nfrom pyside_material import apply_stylesheet\r\n\r\nclass ImagePlayer(QWidget):\r\n def __init__(self, filename):\r\n QWidget.__init__(self, None)\r\n\r\n # Load the file into a QMovie\r\n self.movie = QMovie(filename, QByteArray(), self)\r\n\r\n size = self.movie.scaledSize()\r\n self.setGeometry(0, 0, size.width(), size.height())\r\n\r\n\r\n self.movie_screen = QLabel()\r\n # Make label fit the gif\r\n self.movie_screen.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\r\n self.movie_screen.setAlignment(Qt.AlignCenter)\r\n\r\n # Create the layout\r\n main_layout = QVBoxLayout()\r\n main_layout.addWidget(self.movie_screen)\r\n\r\n self.setLayout(main_layout)\r\n\r\n # Add the QMovie object to the label\r\n self.movie.setCacheMode(QMovie.CacheAll)\r\n self.movie.setSpeed(100)\r\n self.movie.loopCount()\r\n self.movie_screen.setMovie(self.movie)\r\n self.movie.start()\r\n\r\nif __name__ == \"__main__\":\r\n base_dir = os.path.dirname(os.path.abspath(__file__))\r\n path = os.path.join(base_dir)\r\n image_path = path + \"\\images\\docker_gif3.gif\"\r\n gif = image_path\r\n app = QApplication(sys.argv)\r\n player = ImagePlayer(gif)\r\n player.show()\r\n sys.exit(app.exec_())\r\n","sub_path":"GIFWidget.py","file_name":"GIFWidget.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"130643002","text":"import requests\nimport getpass\nimport os, sys, platform\nfrom requests.exceptions import HTTPError\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urljoin\n\nclass FindPdf:\n def __init__(self):\n self.target = input(\"\\u001b[32m Hedefi Giriniz (Örnek-> www.google.com): \")\n self.osName = platform.system()\n self.currentUserName = getpass.getuser()\n\n def MakeRequest(self):\n try:\n self.replaceHttps = \"https://\" + self.target\n self.req = requests.get(self.replaceHttps, allow_redirects=True)\n self.soup = BeautifulSoup(self.req.text, \"html.parser\")\n \n if self.req.status_code == 200:\n self.ScrapingPDF()\n\n else:\n print(\"\\u001b[31m Sayfa Yanıt Vermiyor !\")\n sys.exit()\n\n except HTTPError as http_err:\n print(http_err)\n sys.exit()\n\n except Exception as err:\n print(err)\n sys.exit()\n\n def ScrapingPDF(self):\n self.numberOfPdf = 0\n\n for link in self.soup.select(\"a[href$='.pdf']\"):\n self.numberOfPdf += 1\n self.folderLocation = os.getcwd()\n \n if not os.path.exists(self.folderLocation +\"/AllPDF\"):os.mkdir(self.folderLocation+\"/AllPDF\") # pdfleri kaydedeceğinmiz dosya yoksa dosyayı oluşturur\n #pdf dosyalarını adlandırıyoruz\n self.filename = os.path.join(self.folderLocation+\"/AllPDF\",link['href'].split('/')[-1])\n\n with open(self.filename, 'wb') as f:\n f.write(requests.get(urljoin(self.replaceHttps,link['href'])).content)\n print(\"\\u001b[34m PDF Dosyaları Şuraya Kaydedildi -> \\u001b[32m\" + self.folderLocation + \"/AllPDF\")\n\n if self.numberOfPdf == 0:\n print(\"\\u001b[31m Her hangi bir PDF bulunamadı !\")\n\n\n\na = FindPdf()\na.MakeRequest()\n","sub_path":"catchpdf.py","file_name":"catchpdf.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"50222742","text":"\"\"\"\nskill laugh\n\nCopyright (C) 2018 JarbasAI\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\n copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\nTHE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\n\n\"\"\"\n\nfrom mycroft import MycroftSkill, intent_file_handler, intent_handler\nfrom adapt.intent import IntentBuilder\nfrom mycroft.audio import wait_while_speaking, is_speaking\nfrom mycroft.util import play_wav, play_mp3, play_ogg\nfrom os import listdir\nfrom os.path import join, dirname\nimport random\nfrom datetime import timedelta, datetime\n\n\nclass LaughSkill(MycroftSkill):\n def __init__(self):\n MycroftSkill.__init__(self)\n self.random_laugh = False\n self.sounds = {\"male\": [], \"female\": []}\n if \"gender\" not in self.settings:\n self.settings[\"gender\"] = \"male\"\n if \"sounds_dir\" not in self.settings:\n self.settings[\"sounds_dir\"] = join(dirname(__file__), \"sounds\")\n self.p = None\n self.settings_change_callback = self._fix_gender\n\n def _fix_gender(self):\n if \"f\" in self.settings[\"gender\"].lower():\n self.settings[\"gender\"] = \"female\"\n elif \"m\" in self.settings[\"gender\"].lower():\n self.settings[\"gender\"] = \"male\"\n else:\n self.settings[\"gender\"] = \"robot\"\n\n def initialize(self):\n sounds_dir = join(self.settings[\"sounds_dir\"], \"male\")\n self.sounds[\"male\"] = [join(sounds_dir, sound) for sound in\n listdir(sounds_dir) if\n \".wav\" in sound or \".mp3\" in\n sound]\n sounds_dir = join(self.settings[\"sounds_dir\"], \"female\")\n self.sounds[\"female\"] = [join(sounds_dir, sound) for sound in\n listdir(sounds_dir) if\n \".wav\" in sound or \".mp3\" in sound]\n sounds_dir = join(self.settings[\"sounds_dir\"], \"robot\")\n self.sounds[\"robot\"] = [join(sounds_dir, sound) for sound in\n listdir(sounds_dir) if\n \".wav\" in sound or \".mp3\" in sound]\n # stop laughs for speech execution\n self.add_event(\"speak\", self.stop_laugh)\n\n def laugh(self):\n # dont laugh over a speech message\n if is_speaking():\n wait_while_speaking()\n\n sound = random.choice(self.sounds[self.settings[\"gender\"]])\n if \".mp3\" in sound:\n self.p = play_mp3(sound)\n elif \".ogg\" in sound:\n self.p = play_ogg(sound)\n else:\n self.p = play_wav(sound)\n\n @intent_file_handler(\"Laugh.intent\")\n def handle_laugh_intent(self, message):\n self.laugh()\n\n @intent_file_handler(\"RandomLaugh.intent\")\n def handle_random_intent(self, message):\n # initiate random laughing\n self.log.info(\"Laughing skill: Triggering random laughing\")\n self.random_laugh = True\n self.handle_laugh_event(message)\n\n @intent_handler(\n IntentBuilder('StopLaughing').require('Stop').require('Laugh'))\n def halt_laughing(self, message):\n self.log.info(\"Laughing skill: Stopping\")\n # if in random laugh mode, cancel the scheduled event\n if self.random_laugh:\n self.log.info(\"Laughing skill: Stopping random laugh event\")\n self.random_laugh = False\n self.cancel_scheduled_event('random_laugh')\n self.speak_dialog(\"cancel\")\n else:\n self.speak_dialog(\"cancel_fail\")\n\n def handle_laugh_event(self, message):\n # create a scheduled event to laugh at a random interval between 1\n # minute and half an hour\n if not self.random_laugh:\n return\n self.log.info(\"Laughing skill: Handling laugh event\")\n self.laugh()\n self.cancel_scheduled_event('random_laugh')\n self.schedule_event(self.handle_laugh_event,\n datetime.now() + timedelta(\n seconds=random.randrange(60, 1800)),\n name='random_laugh')\n\n def stop_laugh(self):\n if self.p is not None:\n self.p.terminate()\n return True\n return False\n\n def stop(self):\n # abort current laugh\n stopped = self.stop_laugh()\n # stop random laughs\n if self.random_laugh:\n self.halt_laughing(None)\n stopped = True\n return stopped\n\n\ndef create_skill():\n return LaughSkill()\n","sub_path":"laugh/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"79964139","text":"\"\"\"\nContains the 'ClustarData' class, which is responsible for executing the\nentire project pipeline for detecting groups in a single FITS image; this \nclass also collects and stores all relevant data, statistics, and variables in\nthis pipeline.\n\nVisit for additional information.\n\"\"\"\n\n\nfrom clustar import denoise, group, graph, fit\nimport astropy.io.fits\nimport numpy as np\n\n\nclass ClustarData(object):\n \"\"\"\n A class for executing the entire pipline for detecting groups in a FITS \n image and for storing all relevant data associated with each group.\n \n Attributes\n ----------\n path : str\n Path to FITS file.\n \n image : Image\n Internal class for storing FITS image variables.\n \n params : Params\n Internal class for specifying the ClustarData parameters.\n \n groups : list\n List of 'Group' objects extracted from the given FITS image.\n \n flag : bool\n True if any detected group in the FITS image is flagged for manual \n review, otherwise false.\n \n Methods\n -------\n update(**kwargs)\n Updates 'Params' object with the specified arguments and executes the\n entire pipeline.\n \n reset(**kwargs)\n Resets 'Params' object to the default values, then updates 'Params' \n object with the specified arguments and executes the entire pipeline.\n \n identify(vmin=None, vmax=None, show=True, dpi=180)\n Displays the FITS image and identifies the groups in green, orange, \n or red rectangles, which are defined as:\n 1. 'Green' denotes that the group is not flagged for manual review\n 2. 'Orange' denotes that the group is not flagged for manual review, \n but the group is smaller than the beam size.\n 3. 'Red' denotes that the group is flagged for manual review.\n Beam size is the white oval shown on the bottom right corner of \n the FITS image.\n \n Examples\n --------\n Create the 'ClustarData' object by specifying the path to FITS file. \n >>> cd = ClustarData(path='~/data/example.fits', threshold=0.025)\n \n Visualize the detected groups.\n >>> cd.identify()\n \n Access individual 'Group' objects.\n >>> cd.groups\n \n Notes\n -----\n Visit for additional information.\n \"\"\"\n\n class Image(object):\n \"\"\"\n An internal class for storing FITS image variables.\n \n Attributes\n ----------\n clean : ndarray\n Data from the FITS image after denoising process.\n \n x : ndarray\n Index values of the 'x' position from the data.\n \n y : ndarray\n Index values of the 'y' position from the data.\n \n pos : ndarray\n Index values of the data, given as (x, y).\n \n nonzero : ndarray\n Index values of nonzero points in the data.\n \n std : ndarray\n Standard deviation values from each block in the grid composed\n in the denoise process; used to calculate the noise statistic.\n \n rms : ndarray\n Root mean squared values from each block in the grid composed\n in the denoise process; used to calculate the noise statistic.\n \n noise : float\n Noise statistic generated for the denoise process; values less \n than \"noise\" times \"sigma\" are set to zero.\n \n major : float\n Length of the major axis for the beam.\n \n minor : float\n Length of the minor axis for the beam.\n \n degrees : float\n Degrees of rotation for the beam.\n \n area : float\n Number of points inside the beam; used to identify groups smaller\n than the beam size.\n \"\"\"\n\n def __init__(self, data, header):\n \"\"\"\n Parameters\n ----------\n data : ndarray\n Raw data from the FITS image; must be 2-D.\n\n header : dict\n Header dictionary stored in FITS file.\n \n Raises\n ------\n KeyError\n If the following keys are missing from the FITS header: \n 'BMAJ', 'BMIN', 'BPA', 'CDELT1', 'CDELT2', and 'OBJECT'.\n \"\"\"\n self.data = data\n self.header = header\n self._setup()\n\n def _setup(self):\n x = range(self.data.shape[1])\n y = range(self.data.shape[0])\n self.x, self.y = np.meshgrid(x, y)\n self.pos = np.dstack((self.x, self.y))\n header = dict(self.header)\n keys = ['BMAJ', 'BMIN', 'BPA', 'CDELT1', 'CDELT2', 'OBJECT']\n for key in keys:\n if key not in header.keys():\n raise KeyError(\"FITS header is missing the \" +\n f\"keyword '{key}'; double check \" +\n \"the file type specification.\")\n # Specify beam parameters.\n self.major = header['BMAJ']/abs(header['CDELT1'])\n self.minor = header['BMIN']/abs(header['CDELT2'])\n self.degrees = header['BPA']\n self.area = np.pi * self.major/2 * self.minor/2\n\n class Group(object):\n \"\"\"\n An internal class for storing variables associated to a detection.\n \n Attributes\n ----------\n image : _Image\n Internal subclass for storing image variables.\n \n res : _Res\n Internal subclass for storing residual variables.\n \n fit : _Fit\n Internal subclass for storing fit variables.\n \n stats : _Stats\n Internal subclass for storing statistics.\n \n metrics : _Metrics\n Internal subclass for storing the evaluated metrics.\n \n flag : bool\n Determines whether this group is marked for manual review.\n \"\"\"\n\n class _Image(object):\n \"\"\"\n An internal subclass for storing image variables associated to a\n detection.\n \n Attributes\n ----------\n data : ndarray\n Subset of raw data from the FITS image identifying the group.\n \n clean : ndarray\n Data of the group after the denoising process.\n \n x : ndarray\n Index values of the 'x' position from the group data.\n \n y : ndarray\n Index values of the 'y' position from the group data.\n \n pos : ndarray\n Index values of the group data, given as (x, y).\n \n nonzero : ndarray\n Index values of nonzero points in the group data.\n \n ref : list\n List containing the minimum row value and minimum column value\n of the group data.\n \n limit : list\n List containing the maximum row value and maximum column value\n of the overall FITS image.\n \"\"\"\n\n def __init__(self, bounds):\n \"\"\"\n Parameters\n ----------\n bounds : list\n List of four integers corresponding to minimum row value, \n maximum row value, minimum column value, and maximum column\n value in this order.\n \"\"\"\n self.bounds = bounds\n self.data = None\n self.clean = None\n self.x = None\n self.y = None\n self.ref = None\n self.limit = None\n self.pos = None\n self.nonzero = None\n\n class _Residuals(object):\n \"\"\"\n An internal subclass for storing residual variables associated to\n a detection.\n \n Attributes\n ----------\n data : ndarray\n Residuals computed in the fitting process. Precisely, they\n are [1 - (\"bivariate Gaussian model\" / \"group data\")].\n \n clean : ndarray\n Residuals computed in the fitting process, where points \n outside of the ellipse are set to zero.\n \n pos : ndarray\n Index values of the residual data, given as (x, y).\n \n inside : ndarray\n Subset of index values that lie inside of the ellipse.\n \n outside : ndarray\n Subset of index values that lie outside of the ellipse.\n \n output : array_like\n List of residuals that lie inside of the ellipse; the result \n of the evaluation metric that is computed on this list is \n compared to the specified threshold; this determines which\n groups are flagged for manual review.\n \"\"\"\n\n def __init__(self):\n self.data = None\n self.clean = None\n self.pos = None\n self.inside = None\n self.outside = None\n self.output = None\n\n class _Fit(object):\n \"\"\"\n An internal subclass for storing fit variables associated to a \n detection.\n \n Attributes\n ----------\n rv : multivariate_normal_frozen\n Frozen multivariable normal distribution generated from the \n group statistics.\n \n bvg : ndarray\n Results of the multivariate normal probability density \n function evaluated at the points specified by the group data.\n \n ellipse : Polygon\n Polygon object containing the points that generate an ellipse\n corresponding to the multivariate normal distribution.\n \n major_peaks : int\n Number of local maximas along the major axis of the ellipse.\n \n minor_peaks : int\n Number of local maximas along the minor axis of the ellipse.\n \"\"\"\n\n def __init__(self):\n self.rv = None\n self.bvg = None\n self.ellipse = None\n self.major_peaks = None\n self.minor_peaks = None\n\n class _Stats(object):\n \"\"\"\n An internal subclass for storing statistics associated to a \n detection.\n \n Attributes\n ----------\n x_bar : float\n Average of index values in the 'x' position weighted by the\n corresponding group data.\n \n y_bar : float\n Average of index values in the 'y' position weighted by the \n corresponding group data.\n \n x_var : float\n Variance of index values in the 'x' position weighted by the \n corresponding group data.\n \n y_var : float\n Variance of index values in the 'y' position weighted by the \n corresponding group data.\n \n covariance : float\n Covariance of the index values weighted by the corresponding\n group data. \n \n covariance_matrix : array_like\n Covariance matrix for the multivariate normal that is used in\n the fitting process.\n \n rho : float\n Correlation coefficient computed from the covariance matrix.\n \n eigen_values : array_like\n Eigenvalues obtained from the eigendecomposition of the\n covariance matrix.\n \n eigen_vectors : array_like\n Eigenvectors obtained from the eigendecomposition of the\n covariance matrix.\n \n x_len : float\n Length of the major axis of the ellipse in pixels.\n \n y_len : float\n Length of the minor axis of the ellipse in pixels.\n \n radians : float\n Rotation of ellipse denoted in radians.\n \n degrees : float\n Rotation of ellipse denoted in degrees.\n \"\"\"\n\n def __init__(self):\n self.x_bar = None\n self.y_bar = None\n self.x_var = None\n self.y_var = None\n self.covariance = None\n self.covariance_matrix = None\n self.rho = None\n self.eigen_values = None\n self.eigen_vectors = None\n self.x_len = None\n self.y_len = None\n self.radians = None\n self.degrees = None\n\n class _Metrics(object):\n \"\"\"\n An internal subclass for storing the evaluated metrics associated\n to a detection.\n \n Attributes\n ----------\n standard_deviation : float\n Standard deviation of the output residuals for the group.\n \n variance : float\n Variance of the output residuals for the group.\n \n average : float\n Mean of the output residuals for the group.\n \n weighted_average : float\n Mean of the output residuals weighted by the group data.\n \"\"\"\n \n def __init__(self):\n self.standard_deviation = None\n self.variance = None\n self.average = None\n self.weighted_average = None\n\n def __init__(self, bounds):\n \"\"\"\n Parameters\n ----------\n bounds : list\n List of four integers corresponding to minimum row value, \n maximum row value, minimum column value, and maximum column\n value in this order.\n \"\"\"\n self.image = self._Image(bounds)\n self.res = self._Residuals()\n self.fit = self._Fit()\n self.stats = self._Stats()\n self.metrics = self._Metrics()\n self.flag = False\n\n class Params(object):\n \"\"\"\n An internal class for specifying the ClustarData parameters.\n \n Attributes\n ----------\n radius_factor : float\n Factor mulitplied to radius to determine cropping circle in the \n denoising process; must be within the range [0, 1].\n \n chunks : int\n Number of chunks to use in a grid; must be an odd number.\n \n quantile : float\n Quantile of RMS to determine the noise level; must be within the \n range [0, 1].\n \n apply_gradient : bool\n Determine if the FITS image should be multiplied by a gradient\n in order to elevate central points; similar to multiplying the\n FITS image by the associated 'pb' data.\n \n sigma : float\n Factor multiplied to noise level to determine the cutoff point,\n where values less than this threshold are set to zero.\n \n alpha : float\n Determines the size of the ellipse in relation to the chi-squared \n distribution; must be within the range (0, 1).\n \n buffer_size : int\n Number of points considered outside of the group range. For in-\n stance, given a 1-d group range of [10, 20], the algorithm checks\n for nonzero points within the range [5, 25] when the 'buffer_size'\n is 5.\n \n group_size : int\n Minimum number of nonzero points that determines a group.\n \n group_factor : float\n Ratio between [0, 1] that specifies the minimum number of \n nonzero points that determines a group in relation to the number\n of nonzero points in the largest group.\n \n metric : str\n Method used for evaluating the groups; must be one of the \n following: \"standard_deviation\", \"variance\", \"average\", or \n \"weighted_average\".\n \n threshold : float\n Cutoff point that determines which groups are flagged for manual\n review, given the specified metric. \n \n split_binary : bool\n Experimental; determine whether binary subgroups identified \n within a group should be split into individual groups.\n \n subgroup_factor : float\n Experimental; ratio between [0, 1] that specifies the subgroup \n range in terms of the absolute maximum intensity.\n \n evaluate_peaks : bool\n Experimental; determine whether the peaks of the output residuals\n should be taken into consideration in the flagging process.\n \n smoothing : int\n Experimental; size of window used in the moving average smoothing\n process for peak evaluation.\n \n clip : float\n Experimental; determines the percentage of tail values that are \n trimmed for peak evaluation.\n \"\"\"\n\n def __init__(self, args):\n \"\"\"\n Parameters\n ----------\n args : dict\n Dictionary of keyword arguments; see 'Attributes' for keys.\n \n Raises\n ------\n KeyError\n If specified key in 'args' does not match the label of the \n specified attributes.\n \"\"\"\n self.radius_factor = 1\n self.chunks = 3\n self.quantile = 0.5\n self.apply_gradient = True\n self.sigma = 5\n self.alpha = 0.2\n self.buffer_size = 10\n self.group_size = 50\n self.group_factor = 0\n self.split_binary = False\n self.subgroup_factor = 0.5\n self.metric = \"variance\"\n self.threshold = 0.01\n self.evaluate_peaks = False\n self.smoothing = 5\n self.clip = 0.75\n self._extract(args)\n\n def _extract(self, args):\n for key in args:\n if key not in vars(self).keys():\n raise KeyError(f\"Invalid keyword '{key}' has been \" +\n \"passed into the ClustarData object.\")\n setattr(self, key, args[key])\n\n def __init__(self, path, **kwargs):\n \"\"\"\n Parameters\n ----------\n path : str\n Path to FITS file.\n \n **kwargs : optional\n See '~clustar.core.ClustarData.params' for other possible \n arguments.\n \"\"\"\n self.path = path\n self.params = self.Params(kwargs)\n self.groups = []\n self.flag = False\n self._load_file()\n self._setup()\n\n def _load_file(self):\n file = astropy.io.fits.open(self.path)\n data = file[0].data[0, 0, :, :]\n header = file[0].header\n self.image = self.Image(data, header)\n\n def _setup(self):\n self = denoise.resolve(self)\n self = group.arrange(self)\n self._build()\n if self.params.split_binary:\n self = group.detect(self)\n self._build()\n self._evaluate()\n\n def _build(self):\n self = group.rectify(self)\n self = group.merge(self)\n self = group.refine(self)\n self = group.extract(self)\n self = group.screen(self)\n self = group.calculate(self)\n\n def _evaluate(self):\n self = fit.compute_fit(self)\n self = fit.compute_ellipse(self)\n self = fit.compute_metrics(self)\n self = fit.compute_peaks(self)\n self = fit.validate(self)\n\n def update(self, **kwargs):\n \"\"\"\n Updates 'Params' object with the specified arguments and executes the\n entire pipeline.\n \n Parameters\n ---------- \n **kwargs : optional\n See '~clustar.core.ClustarData.params' for other possible \n arguments.\n \"\"\"\n self.params.extract(kwargs)\n self._setup()\n\n def reset(self, **kwargs):\n \"\"\"\n Resets 'Params' object to the default values, then updates 'Params' \n object with the specified arguments and executes the entire pipeline.\n \n Parameters\n ---------- \n **kwargs : optional\n See '~clustar.core.ClustarData.params' for other possible \n arguments.\n \"\"\"\n self.params = self.Params(kwargs)\n self._setup()\n\n def identify(self, vmin=None, vmax=None, show=True, dpi=180):\n \"\"\"\n Displays the FITS image and identifies the groups in green, orange, or\n red rectangles, which are defined as:\n 1. Green denotes that the group is not flagged for manual review\n 2. Orange denotes that the group is not flagged for manual review, but\n the group is smaller than the beam size.\n 3. Red denotes that the group is flagged for manual review.\n Beam size is the white oval shown on the bottom right corner of \n the FITS image.\n \n Parameters\n ----------\n vmin : float, optional\n Lower bound for the shown intensities. \n \n vmax : float, optional\n Upper bound for the shown intensities.\n \n show : bool, optional\n Determines whether the groups should be identified. If false, the\n rectangles identifying the groups are not drawn.\n \n dpi : int, optional\n Dots per inch.\n \"\"\"\n graph.identify_groups(self, vmin, vmax, show, dpi)\n","sub_path":"src/clustar/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":22109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"295887329","text":"import numpy as np \r\nimport matplotlib.pyplot as plt\r\n\r\nN = 1000\r\nxArray = []\r\nyArray = []\r\nlength = []\r\n\r\ndef SawWave():\r\n xArray = np.linspace(-1,1,N)\r\n for i in range(len(xArray)):\r\n if xArray[i] < 0:\r\n yArray.append(xArray[i] + 0.5)\r\n elif xArray[i] >= 0 & i < 1:\r\n yArray.append((xArray[i] - 0.5)) \r\n return xArray,yArray\r\n\r\ndef FourierTrans(yArray):\r\n N = 1000\r\n c = np.zeros(N//2 + 1,complex)\r\n for k in range(N//2 + 1):\r\n for n in range(N):\r\n c[k] += yArray[n] * np.exp(-2j*np.pi*k*n/N)\r\n length.append(k)\r\n c[k] = np.abs(c[k])\r\n return c,length\r\n\r\n\r\nxArray,yArray = SawWave()\r\n\r\nc_k,length = FourierTrans(yArray)\r\n\r\nplt.clf()\r\nplt.subplot(2,1,1)\r\nplt.xlim([0,100])\r\nplt.plot(length,c_k)\r\n\r\nplt.subplot(2,1,2)\r\nplt.plot(xArray,yArray)\r\n\r\n","sub_path":"week08/ex1b.py","file_name":"ex1b.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"319343023","text":"# coding=utf-8\nfrom mxnet import ndarray as nd\nimport numpy as np\nx = np.ones((2, 3))\ny = nd.array(x)\nz = y.asnumpy()\n#print x, y, z\n\n# 广播, reshape() 重新塑形\na = nd.arange(3).reshape((3, 1))\nb = nd.arange(2).reshape((1, 2))\nprint ('a:', a)\nprint ('b:', b)\nprint ('a+b:', a+b)\n\n\n","sub_path":"demo3.py","file_name":"demo3.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"233801839","text":"from setuptools import setup\nfrom setuptools import find_packages\nimport os\nimport sys\n\nhere = os.path.abspath(os.path.dirname(__file__)) + '/'\n\nmount_script = 'mount.gridfs' if sys.platform != 'darwin' else 'mount_gridfs'\n\nsetup(\n name=\"gridfs_fuse\",\n url='https://github.com/axiros/py_gridfs_fuse',\n description=open(here + 'README.md').readlines()[1].strip('\\n'),\n license=open(here + 'LICENSE.md').readlines()[0].strip('\\n'),\n version=open(here + 'VERSION').read().strip('\\n') or '0.1.1',\n install_requires=open(here + 'requirements.txt').readlines(),\n include_package_data=True,\n package_dir={'gridfs_fuse': 'gridfs_fuse'},\n packages=find_packages('.'),\n entry_points={\n 'console_scripts': [\n 'gridfs_fuse = gridfs_fuse.main:main',\n '%s = gridfs_fuse.main:_mount_fuse_main' %(mount_script),\n ]\n }\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"13130974","text":"import pandas\nimport skbio\nfrom scipy.stats import spearmanr, pearsonr, kendalltau\nfrom itertools import combinations\nimport random\nfrom numpy import array, zeros\n\ndef foundon( df ) :\n s = df.unstack()\n return s[s > 0].to_dict().keys()\n\ndef host_guest_distances( host_dmatrix, \n guest_dmatrix,\n links,\n shuffled=False ) :\n\n assert type( host_dmatrix ) == skbio.stats.distance._base.DistanceMatrix\n assert type( guest_dmatrix ) == skbio.stats.distance._base.DistanceMatrix\n assert type( links ) == pandas.core.frame.DataFrame\n \n assert host_dmatrix.shape[0] > 3\n assert guest_dmatrix.shape[0] > 3\n\n assert set( host_dmatrix.ids ) == set( links.index )\n assert set( guest_dmatrix.ids ) == set( links.columns )\n\n nlinks = ( links.values > 0 ).sum()\n a = zeros( nlinks * ( nlinks - 1 ) / 2 )\n b = zeros( nlinks * ( nlinks - 1 ) / 2 )\n \n if not shuffled :\n F = foundon( links )\n \n assert len(F) > 3\n \n for n,((i,j),(k,l)) in enumerate( combinations( F, 2 ) ) :\n a[n] = host_dmatrix[ j, l ]\n b[n] = guest_dmatrix[ i, k ]\n \n else :\n \n for n in range( nlinks ) :\n i = random.choice( guest_dmatrix.ids )\n j = random.choice( host_dmatrix.ids )\n k = random.choice( guest_dmatrix.ids )\n l = random.choice( host_dmatrix.ids )\n a[n] = host_dmatrix[ j, l ]\n b[n] = guest_dmatrix[ i, k ]\n \n return ( a, b )\n\ndef all_tests( host_dmatrix,\n guest_dmatrix,\n links,\n permutations=100 ) :\n\n a,b = host_guest_distances( host_dmatrix, guest_dmatrix, links )\n\n R = pearsonr( a, b )[0]\n Roh = spearmanr( a, b )[0]\n Tau = kendalltau( a, b )[0]\n\n r = zeros( permutations )\n roh = zeros( permutations )\n tau = zeros( permutations ) \n \n for n in range(permutations) :\n a,b = host_guest_distances( host_dmatrix,\n guest_dmatrix,\n links,\n shuffled=True )\n r[n] = pearsonr( a, b )[0]\n roh[n] = spearmanr( a, b )[0]\n tau[n] = kendalltau( a, b )[0]\n \n p_r = ( ( array(r) >= R ).sum() + 1 ) / float( permutations + 1 )\n p_roh = ( ( array(roh) >= Roh ).sum() + 1 ) / float( permutations + 1 )\n p_tau = ( ( array(tau) >= Tau ).sum() + 1 ) / float( permutations + 1 )\n \n return { 'r' : R,\n 'p_r' : p_r,\n 'r_perm' : r,\n 'roh' : Roh,\n 'p_roh' : p_roh,\n 'roh_perm' : roh,\n 'tau' : Tau,\n 'p_tau' : p_tau,\n 'tau_perm' : tau }\n\ndef hommola( host_dmatrix,\n guest_dmatrix,\n links,\n permutations=100 ) :\n\n a,b = host_guest_distances( host_dmatrix, guest_dmatrix, links )\n pcc, p = pearsonr( a, b )\n \n prm = zeros( permutations )\n for n in range(permutations) :\n a,b = host_guest_distances( host_dmatrix,\n guest_dmatrix,\n links,\n shuffled=True )\n prm[n] = ( pearsonr( a, b )[0] )\n\n p_value = ( ( array(prm) >= pcc ).sum() + 1 ) / float( permutations + 1 )\n \n return pcc, p_value, prm\n","sub_path":"shand/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"180229776","text":"def main():\r\n print('do something in module',__name__)\r\n\r\n\r\nif __name__ == '__main__':\r\n print('executed module from the command line')\r\n main()\r\n\r\n\r\ndef merge(left, right):\r\n i = 0\r\n j = 0\r\n temp = []\r\n while i <= len(left) - 1 and j <= len(right) - 1:\r\n if left[i] <= right[j]:\r\n temp.append(left[i])\r\n i += 1\r\n else:\r\n temp.append(right[j])\r\n j += 1\r\n temp += left[i:] + right[j:]\r\n return temp\r\n\r\nprint(merge([1,3,4],[2,3,3])) \r\n\r\n\r\ndef merge_sort(lst):\r\n #\r\n #\r\n #\r\n return lst_sorted","sub_path":"src/moduletest.py","file_name":"moduletest.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"248702230","text":"'''Admin interface for dynamic reports'''\nimport Bcfg2.Logger\nimport Bcfg2.Server.Admin\nimport datetime\nimport os\nimport sys\nimport traceback\nfrom Bcfg2 import settings\n\n# Load django and reports stuff _after_ we know we can load settings\nfrom django.core import management\nfrom Bcfg2.Reporting.utils import *\n\nproject_directory = os.path.dirname(settings.__file__)\nproject_name = os.path.basename(project_directory)\nsys.path.append(os.path.join(project_directory, '..'))\nproject_module = __import__(project_name, '', '', [''])\nsys.path.pop()\n\n# Set DJANGO_SETTINGS_MODULE appropriately.\nos.environ['DJANGO_SETTINGS_MODULE'] = '%s.settings' % project_name\nfrom django.db import transaction\n\nfrom Bcfg2.Reporting.models import Client, Interaction, \\\n Performance, Bundle, Group, FailureEntry, PathEntry, \\\n PackageEntry, ServiceEntry, ActionEntry\n\n\ndef printStats(fn):\n \"\"\"\n Print db stats.\n\n Decorator for purging. Prints database statistics after a run.\n \"\"\"\n def print_stats(self, *data):\n classes = (Client, Interaction, Performance, \\\n FailureEntry, ActionEntry, PathEntry, PackageEntry, \\\n ServiceEntry, Group, Bundle)\n\n starts = {}\n for cls in classes:\n starts[cls] = cls.objects.count()\n\n fn(self, *data)\n\n for cls in classes:\n print(\"%s removed: %s\" % (cls().__class__.__name__,\n starts[cls] - cls.objects.count()))\n\n return print_stats\n\n\nclass Reports(Bcfg2.Server.Admin.Mode):\n \"\"\" Manage dynamic reports \"\"\"\n django_commands = ['dbshell', 'shell', 'sqlall', 'validate']\n __usage__ = (\"[command] [options]\\n\"\n \" Commands:\\n\"\n \" init Initialize the database\\n\"\n \" purge Purge records\\n\"\n \" --client [n] Client to operate on\\n\"\n \" --days [n] Records older then n days\\n\"\n \" --expired Expired clients only\\n\"\n \" scrub Scrub the database for duplicate \"\n \"reasons and orphaned entries\\n\"\n \" stats print database statistics\\n\"\n \" update Apply any updates to the reporting \"\n \"database\\n\"\n \"\\n\"\n \" Django commands:\\n \" \\\n + \"\\n \".join(django_commands))\n\n def __init__(self, setup):\n Bcfg2.Server.Admin.Mode.__init__(self, setup)\n try:\n import south\n except ImportError:\n print(\"Django south is required for Reporting\")\n raise SystemExit(-3)\n\n def __call__(self, args):\n if len(args) == 0 or args[0] == '-h':\n print(self.__usage__)\n raise SystemExit(0)\n\n # FIXME - dry run\n\n if args[0] in self.django_commands:\n self.django_command_proxy(args[0])\n elif args[0] == 'scrub':\n self.scrub()\n elif args[0] == 'stats':\n self.stats()\n elif args[0] in ['init', 'update', 'syncdb']:\n if self.setup['debug']:\n vrb = 2\n elif self.setup['verbose']:\n vrb = 1\n else:\n vrb = 0\n try:\n management.call_command(\"syncdb\", verbosity=vrb)\n management.call_command(\"migrate\", verbosity=vrb)\n except:\n print(\"Update failed: %s\" %\n traceback.format_exc().splitlines()[-1])\n raise SystemExit(1)\n elif args[0] == 'purge':\n expired = False\n client = None\n maxdate = None\n state = None\n i = 1\n while i < len(args):\n if args[i] == '-c' or args[i] == '--client':\n if client:\n self.errExit(\"Only one client per run\")\n client = args[i + 1]\n print(client)\n i = i + 1\n elif args[i] == '--days':\n if maxdate:\n self.errExit(\"Max date specified multiple times\")\n try:\n maxdate = datetime.datetime.now() - \\\n datetime.timedelta(days=int(args[i + 1]))\n except:\n self.log.error(\"Invalid number of days: %s\" %\n args[i + 1])\n raise SystemExit(-1)\n i = i + 1\n elif args[i] == '--expired':\n expired = True\n i = i + 1\n if expired:\n if state:\n self.log.error(\"--state is not valid with --expired\")\n raise SystemExit(-1)\n self.purge_expired(maxdate)\n else:\n self.purge(client, maxdate, state)\n else:\n print(\"Unknown command: %s\" % args[0])\n\n @transaction.commit_on_success\n def scrub(self):\n ''' Perform a thorough scrub and cleanup of the database '''\n\n # Cleanup unused entries\n for cls in (Group, Bundle, FailureEntry, ActionEntry, PathEntry,\n PackageEntry, PathEntry):\n try:\n start_count = cls.objects.count()\n cls.prune_orphans()\n self.log.info(\"Pruned %d %s records\" % \\\n (start_count - cls.objects.count(), cls.__class__.__name__))\n except:\n print(\"Failed to prune %s: %s\" %\n (cls.__class__.__name__,\n traceback.format_exc().splitlines()[-1]))\n\n def django_command_proxy(self, command):\n '''Call a django command'''\n if command == 'sqlall':\n management.call_command(command, 'Reporting')\n else:\n management.call_command(command)\n\n @printStats\n def purge(self, client=None, maxdate=None, state=None):\n '''Purge historical data from the database'''\n\n filtered = False # indicates whether or not a client should be deleted\n\n if not client and not maxdate and not state:\n self.errExit(\"Reports.prune: Refusing to prune all data\")\n\n ipurge = Interaction.objects\n if client:\n try:\n cobj = Client.objects.get(name=client)\n ipurge = ipurge.filter(client=cobj)\n except Client.DoesNotExist:\n self.log.error(\"Client %s not in database\" % client)\n raise SystemExit(-1)\n self.log.debug(\"Filtering by client: %s\" % client)\n\n if maxdate:\n filtered = True\n if not isinstance(maxdate, datetime.datetime):\n raise TypeError(\"maxdate is not a DateTime object\")\n self.log.debug(\"Filtering by maxdate: %s\" % maxdate)\n ipurge = ipurge.filter(timestamp__lt=maxdate)\n\n if settings.DATABASES['default']['ENGINE'] == \\\n 'django.db.backends.sqlite3':\n grp_limit = 100\n else:\n grp_limit = 1000\n if state:\n filtered = True\n if state not in ('dirty', 'clean', 'modified'):\n raise TypeError(\"state is not one of the following values: \"\n \"dirty, clean, modified\")\n self.log.debug(\"Filtering by state: %s\" % state)\n ipurge = ipurge.filter(state=state)\n\n count = ipurge.count()\n rnum = 0\n try:\n while rnum < count:\n grp = list(ipurge[:grp_limit].values(\"id\"))\n # just in case...\n if not grp:\n break\n Interaction.objects.filter(id__in=[x['id']\n for x in grp]).delete()\n rnum += len(grp)\n self.log.debug(\"Deleted %s of %s\" % (rnum, count))\n except:\n self.log.error(\"Failed to remove interactions\")\n (a, b, c) = sys.exc_info()\n msg = traceback.format_exception(a, b, c, limit=2)[-1][:-1]\n del a, b, c\n self.log.error(msg)\n\n # Prune any orphaned ManyToMany relations\n for m2m in (ActionEntry, PackageEntry, PathEntry, ServiceEntry, \\\n FailureEntry, Group, Bundle):\n self.log.debug(\"Pruning any orphaned %s objects\" % \\\n m2m().__class__.__name__)\n m2m.prune_orphans()\n\n if client and not filtered:\n # Delete the client, ping data is automatic\n try:\n self.log.debug(\"Purging client %s\" % client)\n cobj.delete()\n except:\n self.log.error(\"Failed to delete client %s\" % client)\n (a, b, c) = sys.exc_info()\n msg = traceback.format_exception(a, b, c, limit=2)[-1][:-1]\n del a, b, c\n self.log.error(msg)\n\n @printStats\n def purge_expired(self, maxdate=None):\n '''Purge expired clients from the database'''\n\n if maxdate:\n if not isinstance(maxdate, datetime.datetime):\n raise TypeError(\"maxdate is not a DateTime object\")\n self.log.debug(\"Filtering by maxdate: %s\" % maxdate)\n clients = Client.objects.filter(expiration__lt=maxdate)\n else:\n clients = Client.objects.filter(expiration__isnull=False)\n\n for client in clients:\n self.log.debug(\"Purging client %s\" % client)\n Interaction.objects.filter(client=client).delete()\n client.delete()\n\n def stats(self):\n classes = (Client, Interaction, Performance, \\\n FailureEntry, ActionEntry, PathEntry, PackageEntry, \\\n ServiceEntry, Group, Bundle)\n\n for cls in classes:\n print(\"%s has %s records\" % (cls().__class__.__name__,\n cls.objects.count()))\n","sub_path":"src/lib/Bcfg2/Server/Admin/Reports.py","file_name":"Reports.py","file_ext":"py","file_size_in_byte":9990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"38718729","text":"from django.views.generic import ListView\nfrom django.views.generic import DetailView\nfrom results.models import *\nfrom django.core.urlresolvers import reverse\nfrom time import *\nfrom django.utils import timezone\nfrom django.views.generic.edit import CreateView, DeleteView, UpdateView\n\n\nclass listviewa(ListView):\n model = instagram_data\n paginate_by = 10\n # model = instagram_data.objects.filter()\n template_name = 'listview.html'\n\n # def get_queryset(self, **kwargs):\n #\n # return instagram_data.objects.filter(instagram_name = 'hello')\n\n\n # def get_success_url(self):\n # return reverse('contacts-list')\n\n\n# http://localhost:8000/detailed/43/\nclass detailview(DetailView):\n\n model = instagram_data\n template_name = 'detailview.html'\n slug_field = 'instagram_name'\n def get_context_data(self, **kwargs):\n context = super(detailview, self).get_context_data(**kwargs)\n context['now'] = timezone.now()\n return context\n\nclass createviewa(CreateView):\n\n model = instagram_data\n template_name = 'createview.html'\n fields = ['instagram_name']\n\n def get_success_url(self):\n return reverse('instagramlist')\n\nclass deleteviewa(DeleteView):\n model = instagram_data\n template_name = 'deleteview.html'\n fields = ['instagram_name']\n\n def get_success_url(self):\n return reverse('instagramlist')\n\n\n\n\nclass updateviewa(UpdateView):\n fields = ['instagram_name']\n model = instagram_data\n template_name = 'edit.html'\n\n def get_success_url(self):\n return reverse('instagramlist')\n","sub_path":"instagram/generic_views.py","file_name":"generic_views.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"636261510","text":"\"\"\"Gmod loading screen stuff\n\nRevision ID: 30033e4e08ef\nRevises: 51abb7077d9e\nCreate Date: 2015-08-06 22:07:45.554000\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '30033e4e08ef'\ndown_revision = '51abb7077d9e'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n\ndef upgrade():\n op.create_table('gmod_loadscreen_gamemode',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('rules', postgresql.ARRAY(sa.Text()), nullable=False),\n sa.Column('extrainfo', postgresql.ARRAY(sa.Text()), nullable=True),\n sa.Column('title', sa.Text(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('gmod_loadscreen_background',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('map', sa.Text(), nullable=False),\n sa.Column('gamemode', sa.Text(), nullable=True),\n sa.Column('url', sa.Text(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n\n\ndef downgrade():\n op.drop_table('gmod_loadscreen_background')\n op.drop_table('gmod_loadscreen_gamemode')\n","sub_path":"temporals_web/migration/versions/201508062207_30033e4e08ef_gmod_loading_screen_stuff.py","file_name":"201508062207_30033e4e08ef_gmod_loading_screen_stuff.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"122721656","text":"\"\"\"Example script to get the apks feed and show the download links.\n\nRemember to set the Koodous API key in the ``Koodous`` instance or use the environment variable `KOODOUS_API_TOKEN`.\n\"\"\"\nimport os\n\nfrom koodous.api import Koodous\n\nif __name__ == '__main__':\n token = os.getenv('KOODOUS_API_TOKEN')\n koodous = Koodous(token)\n # Get the apks feed.\n apks_feed = koodous.feed_apks()\n # Write the apks feed zip.\n apks_feed.write('/tmp/feed.zip')\n samples = apks_feed.samples()\n # Print the samples sha256s.\n print([sample.sha256 for sample in samples])\n # Download the samples apks.\n apks_feed.download_samples(samples, '/tmp/samples_apks')\n\n","sub_path":"examples/apks_feed.py","file_name":"apks_feed.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"275052603","text":"import cv2\nfrom matplotlib import pyplot as plt\nimport os\nimport numpy as np\nimport time\nimport imutils\n\next = 'jpg'\ndirs = ['/green/', '/red/', '/yellow/']\npath = '../../../data/traffic-light'\n\n\ndef test_hist(filename):\n rgb = cv2.imread(filename)\n height, width, depth = rgb.shape\n newWidth = 200\n newHeight = (newWidth * height) // width\n\n rgb = imutils.resize(rgb, width=newWidth)\n hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)\n\n # определить диапазон и маску зеленого цвета в HSV\n lower_g = np.array([36, 50, 100])\n upper_g = np.array([95, 255, 255])\n mask_g = cv2.inRange(hsv, lower_g, upper_g)\n\n # определить диапазон и маску желтого цвета в HSV\n lower_y = np.array([15, 100, 100])\n upper_y = np.array([35, 255, 255])\n mask_y = cv2.inRange(hsv, lower_y, upper_y)\n\n # определить диапазон и маску красного цвета в HSV (красный цвет с обоих концов, поэтому маски красного - две)\n lower_r0 = np.array([0, 150, 150])\n upper_r0 = np.array([14, 255, 255])\n mask_r0 = cv2.inRange(hsv, lower_r0, upper_r0)\n\n lower_r1 = np.array([160, 150, 150])\n upper_r1 = np.array([180, 255, 255])\n mask_r1 = cv2.inRange(hsv, lower_r1, upper_r1)\n\n mask = cv2.bitwise_or(mask_r0, mask_r1)\n mask = cv2.bitwise_or(mask, mask_y)\n mask = cv2.bitwise_or(mask, mask_g)\n\n # Побитовая-И-маска и исходное изображение\n res = cv2.bitwise_and(rgb, rgb, mask=mask)\n gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)\n bilateral_filtered_gray = cv2.bilateralFilter(gray, 5, 175, 175)\n\n circles = cv2.HoughCircles(bilateral_filtered_gray, cv2.HOUGH_GRADIENT, 1, newHeight, param1=150, param2=15,\n minRadius=0, maxRadius=newHeight // 3)\n\n if circles is not None:\n circles = np.uint16(np.around(circles))\n for i in circles[0, :]:\n # draw the outer circle\n cv2.circle(res, (i[0], i[1]), i[2], (255, 255, 255), 2)\n\n hist = cv2.calcHist([hsv], [0], mask, [180], [0, 180])\n plt.plot(hist, color='gray')\n plt.xlim([0, 180])\n plt.ylim([0, 2000])\n plt.show()\n return hist,rgb, mask,bilateral_filtered_gray, res\n\n\ndef getMainColor(hist: bytearray) -> tuple:\n point = np.argmax(hist)\n color = None\n\n if point >= 0 and point <= 14 or point > 160:\n color = 'red'\n elif point >= 15 and point < 35:\n color = 'yellow'\n elif point >= 36 and point <= 95:\n color = 'green'\n\n return color, point\n\nsp = []\nfor d in dirs:\n for f in os.listdir(path + d):\n if f.endswith(ext):\n sp.append(path+d+f)\ni = int(input(\"Просьба начинать отсчет с нуля)\"))\nhist,rgb,mask,bilateral_filtered_gray,res = test_hist(sp[i])\nprint(getMainColor(hist))\n\ncv2.imshow('img', rgb)\ncv2.imshow('mask', mask)\ncv2.imshow('gray', bilateral_filtered_gray)\ncv2.imshow('res', res)\n\nwhile 1:\n if cv2.waitKey(1) == 27:\n break\n\n\ncv2.destroyAllWindows()","sub_path":"sources/part1/Artur/Arturs_Circle_check(secondtask).py","file_name":"Arturs_Circle_check(secondtask).py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"466139919","text":"# This file is part of MLDB. Copyright 2015 Datacratic. All rights reserved.\n\n# MLDB-1025-dataset-output-with-default.py\n# Guy Dumais, 4 November 2015\n# Copyright (c) 2015 Datacratic Inc. All rights reserved.\n\nimport json\nimport datetime\nimport requests\nimport random\n\ndataset_index = 1\n\ndef run_transform(output):\n global dataset_index\n dataset_index += 1 \n result = mldb.perform(\"PUT\", \"/v1/procedures/transform_procedure\", [], \n {\n \"type\": \"transform\",\n \"params\": {\n \"inputDataset\": { \"id\": \"dataset1\" },\n \"outputDataset\": output,\n \"runOnCreation\" : True,\n \"where\": \"rowName() = '2'\"\n }\n })\n\n mldb.log(result)\n assert result['statusCode'] == 201, \"failed to create the procedure with output: %s\" % output\n\n id = output['id'] if 'id' in output else output\n\n result = mldb.perform('GET', '/v1/query', [['q', \"SELECT * FROM \" + id]])\n mldb.log(result)\n assert result['statusCode'] == 200, \"failed to get the transformed dataset\"\n rows = json.loads(result[\"response\"])\n mldb.log(rows)\n return rows\n\n \ndef load_test_dataset():\n ds1 = mldb.create_dataset({\n 'type': 'sparse.mutable',\n 'id': 'dataset1'})\n\n row_count = 10\n for i in xrange(row_count - 1):\n # row name is x's value\n ds1.record_row(str(i), [['x', i, now], ['y', i, now]])\n \n ds1.commit()\n\ndef train_svd_with_default():\n svd_procedure = \"/v1/procedures/svd\"\n # svd procedure configuration\n svd_config = {\n 'type' : 'svd.train',\n 'params' :\n\t{\n \"trainingData\": \"select * from dataset1\", \n \"rowOutputDataset\": \"svd_row\", # first way to specify output dataset using default\n \"columnOutputDataset\" : { # second way to specify an output dataset using default\n \"id\": \"svd_column\"\n }\n\t}\n }\n \n result = mldb.perform('PUT', svd_procedure, [], svd_config)\n response = json.loads(result['response'])\n msg = \"Could not create the svd procedure - got status {}\\n{}\"\n mldb.log(response)\n msg = msg.format(result['statusCode'], response)\n assert result['statusCode'] == 201, msg\n\n result = mldb.perform('POST', svd_procedure + '/runs')\n response = json.loads(result['response'])\n msg = \"Could not train the svd procedure - got status {}\\n{}\"\n mldb.log(response)\n msg = msg.format(result['statusCode'], response)\n assert 300 > result['statusCode'] >= 200, msg\n\n result = mldb.perform('GET', '/v1/datasets/svd_column', [])\n response = json.loads(result['response'])\n #mldb.log(response)\n msg = \"Could not get the svd embedding output - got status {}\\n{}\"\n msg = msg.format(result['statusCode'], response)\n assert result['statusCode'] == 200, msg\n assert response['type'] == 'embedding', 'expected an embedding output dataset'\n\n result = mldb.perform('GET', '/v1/datasets/svd_row', [])\n response = json.loads(result['response'])\n #mldb.log(response)\n msg = \"Could not get the svd embedding output - got status {}\\n{}\"\n msg = msg.format(result['statusCode'], response)\n assert result['statusCode'] == 200, msg\n assert response['type'] == 'embedding', 'expected an embedding output dataset'\n\ndef train_kmeans_with_default(config):\n kmeans_procedure = \"/v1/procedures/kmeans\"\n # kmeans procedure configuration\n \n result = mldb.perform('PUT', kmeans_procedure, [], config)\n response = json.loads(result['response'])\n msg = \"Could not create the kmeans procedure - got status {}\\n{}\"\n mldb.log(response)\n msg = msg.format(result['statusCode'], response)\n assert result['statusCode'] == 201, msg\n\n result = mldb.perform('POST', kmeans_procedure + '/runs')\n response = json.loads(result['response'])\n msg = \"Could not train the kmeans procedure - got status {}\\n{}\"\n mldb.log(response)\n msg = msg.format(result['statusCode'], response)\n assert 300 > result['statusCode'] >= 200, msg\n\n centroids_id = config['params']['centroidsDataset']['id']\n result = mldb.perform('GET', '/v1/datasets/' + centroids_id, [])\n response = json.loads(result['response'])\n mldb.log(response)\n msg = \"Could not get the kmeans centroids [{}]- got status {}\\n{}\"\n msg = msg.format(centroids_id, result['statusCode'], response)\n assert result['statusCode'] == 200, msg\n return response['type']\n\nnow = datetime.datetime.now()\nsame_time_tomorrow = now + datetime.timedelta(days=1)\nin_two_hours = now + datetime.timedelta(hours=2)\n\nload_test_dataset()\n\n# check that the transformed dataset is as expected\nassert len( run_transform({ \"id\": \"dataset2\", \"type\": \"sparse.mutable\" })) == 1, 'expected only one row to be returned'\n# check that default type works\nassert len( run_transform({ \"id\": \"dataset3\" })) == 1, 'expected only one row to be returned'\n# check that string are interpreted as dataset id\nassert len( run_transform(\"dataset4\") ) == 1, 'expected only one row to be returned'\n# check that the transformed dataset can be overwritten\nassert len( run_transform({ \"id\": \"dataset2\", \"type\": \"sparse.mutable\" })) == 1, 'expected only one row to be returned'\n \ntrain_svd_with_default();\n\nmetric = 'euclidean'\nkmeans_config = {\n 'type' : 'kmeans.train',\n 'params' : {\n 'trainingData' : 'select * from dataset1',\n 'centroidsDataset' : {'id' : 'kmeans_centroids', \n 'params': {'metric': metric}},\n 'numClusters' : 2,\n 'metric': metric\n }\n}\n\n# check that the default type is used\nresult = mldb.perform('GET', '/v1/datasets', []);\ndataset_count_before = len(json.loads(result['response']))\nassert train_kmeans_with_default(kmeans_config) == 'embedding', 'expected an embedding output dataset'\nresult = mldb.perform('GET', '/v1/datasets', []);\ndataset_count_after = len(json.loads(result['response']))\nassert dataset_count_before + 1 == dataset_count_after, 'only the centroids must have been created'\n\nkmeans_config = {\n 'type' : 'kmeans.train',\n 'params' : {\n 'trainingData' : 'select * from dataset1',\n 'centroidsDataset' : {'id' : 'kmeans_centroids_2',\n 'type' : 'sparse.mutable'},\n 'outputDataset': { 'type' : 'embedding'},\n 'numClusters' : 2,\n 'metric': metric\n }\n}\n\n# check that the type can be changed and that id are auto-generated when not specified\nresult = mldb.perform('GET', '/v1/datasets', []);\ndataset_count_before = len(json.loads(result['response']))\nassert train_kmeans_with_default(kmeans_config) == 'sparse.mutable', 'expected an sparse.mutable output dataset'\nresult = mldb.perform('GET', '/v1/datasets', []);\ndataset_count_after = len(json.loads(result['response']))\nassert dataset_count_before + 2 == dataset_count_after, 'expect the centroids and the outputDataset to be created'\n\nmldb.script.set_return('success')\n","sub_path":"testing/MLDB-1025-dataset-output-with-default.py","file_name":"MLDB-1025-dataset-output-with-default.py","file_ext":"py","file_size_in_byte":7083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"460211193","text":"from os import path\n\nfrom cork.mongodb_backend import MongoDBBackend\n\nDATABASE = 'users'\n\nFIXTURES_DIR = path.join(path.dirname(path.realpath(__file__)), 'fixtures')\nFIXTURE_USERS_FILE = 'users.json'\nFIXTURE_ROLES_FILE = 'roles.json'\n\ndb = MongoDBBackend(db_name=DATABASE, initialize=True)\n\n\ndef populate_database():\n \"\"\"\n Provides initial data for users and roles.\n :return: None\n :rtype: NoneType\n \"\"\"\n import json\n\n db.users._coll.insert(json.load(open(path.join(FIXTURES_DIR, FIXTURE_USERS_FILE))))\n db.roles._coll.insert(json.load(open(path.join(FIXTURES_DIR, FIXTURE_ROLES_FILE))))","sub_path":"pypiserver/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"372068747","text":"import base64\nimport logging\nimport os\nimport unittest\nimport warnings\n\nimport boto3\nimport yaml\n\nfrom src.utils.env import get_env, get_secret_env, dict_to_env\n\n# Config\nconfig = yaml.load(open('config/dev.yml', 'r'), Loader=yaml.SafeLoader)\nserverless = yaml.load(open('serverless.yml', 'r'), Loader=yaml.SafeLoader)\n\n# Environments\ndict_to_env(config)\nos.environ['JOB_QUEUE'] = 'dev-%s-queue' % serverless['service']['name']\nos.environ['PAGE_MANAGEMENT_TABLE'] = 'dev-%s-db' % serverless['service']['name']\n\n# Logger\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nstream_handler = logging.StreamHandler()\nstream_handler.setFormatter(logging.Formatter('[%(levelname)s-%(module)s-%(funcName)s] %(message)s'))\nstream_handler.setLevel(logging.INFO)\nlogger.addHandler(stream_handler)\n\n\nclass TestEnv(unittest.TestCase):\n def setUp(self):\n # boto3 issue https://github.com/boto/boto3/issues/454#issuecomment-380900404\n warnings.filterwarnings(\"ignore\", category=ResourceWarning, message=\"unclosed.*\")\n\n def test_get_exsists_env(self):\n \"\"\"\n 存在する環境変数を取得\n \"\"\"\n os.environ['GIT2WIKI_EXISTS_ENV'] = 'sample_env'\n sample_env = get_env('GIT2WIKI_EXISTS_ENV')\n\n self.assertEqual(sample_env, 'sample_env')\n\n def test_get_not_exsists_env(self):\n \"\"\"\n 存在しない環境変数を取得\n \"\"\"\n sample_env = get_env('GIT2WIKI_NOT_EXISTS_ENV')\n sample_secret_env = get_secret_env('GIT2WIKI_NOT_EXISTS_ENV')\n\n self.assertIsNone(sample_env)\n self.assertIsNone(sample_secret_env)\n\n def test_get_plain_text_with_get_secret_env(self):\n \"\"\"\n 暗号化されていない環境変数を get_secret_env() で取得\n \"\"\"\n sample_env = get_secret_env('BACKLOG_API_KEY')\n\n self.assertIsNotNone(sample_env)\n\n def test_get_encrypted_text_with_get_secret_env(self):\n \"\"\"\n 暗号化済みの環境変数を get_secret_env() で取得\n \"\"\"\n kms = boto3.client('kms')\n plain_text_env = 'test'\n encrypted = kms.encrypt(\n KeyId=os.environ['AWS_KMS_ARN'].split('/')[-1],\n Plaintext=plain_text_env.encode(),\n )\n encrypted_env = base64.urlsafe_b64encode(encrypted['CiphertextBlob']).decode()\n os.environ['GIT2WIKI_ENCRYPTED_ENV'] = encrypted_env\n\n self.assertNotEqual(plain_text_env, encrypted_env)\n self.assertEqual(encrypted_env, get_secret_env('GIT2WIKI_ENCRYPTED_ENV'))\n\n def tearDown(self):\n pass\n","sub_path":"tests/test_env.py","file_name":"test_env.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"87710418","text":"'''\nRearrange Array Alternately\nGiven a sorted array of positive integers. Your task is to rearrange the array elements alternatively i.e first element should be max value, second should be min value, third should be second max, fourth should be second min and so on.\n\nExample 1:\n\nInput:\nN = 6\narr[] = {1,2,3,4,5,6}\nOutput: 6 1 5 2 4 3\nExplanation: Max element = 6, min = 1, \nsecond max = 5, second min = 2, and \nso on... Modified array is : 6 1 5 2 4 3.\nExample 2:\n\nInput:\nN = 11\narr[]={10,20,30,40,50,60,70,80,90,100,110}\nOutput:110 10 100 20 90 30 80 40 70 50 60\nExplanation: Max element = 110, min = 10, \nsecond max = 100, second min = 20, and \nso on... Modified array is : \n110 10 100 20 90 30 80 40 70 50 60.\nYour Task:\nThe task is to complete the function rearrange() which rearranges elements as explained above. Printing of the modified array will be handled by driver code.\n\nExpected Time Complexity: O(N).\nExpected Auxiliary Space: O(1).\n'''\n\ndef rearrangeAlt(arr):\n if len(arr) <= 1 :\n return arr\n index = 0\n for i in range((len(arr)+1)//2):\n last = arr.pop()\n arr.insert(index,last)\n index += 2\n return arr\n\nprint (\"\\n Rearrange Array Alternately : \",rearrangeAlt([10,20,30,40,50,60,70,80,90,100,110])) \n","sub_path":"Easy/rearrangeAlt.py","file_name":"rearrangeAlt.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"125770915","text":"# -*- coding:utf-8 -*-\n\"\"\"\nRemove Invalid Parentheses\nRemove the minimum number of invalid parentheses in order to make the input string valid. \nReturn all possible results.\n\nNote: The input string may contain letters other than the parentheses ( and ).\n\nExamples:\n\"()())()\" -> [\"()()()\", \"(())()\"]\n\"(a)())()\" -> [\"(a)()()\", \"(a())()\"]\n\")(\" -> [\"\"]\n\"\"\"\n\ndef remove_invalid_parentheses(s):\n dct = {'{': 1, '}': -1}\n count = 0\n str = ''\n for c in s:\n if c in dct:\n count += dct[c]\n else:\n count += 0\n\n if count >=0:\n str += c\n else:\n count = 0\n return str\n\nprint(remove_invalid_parentheses('(a)()()'))\n\nclass Solution(object):\n \"\"\"\n The idea is to remove each character from the string\n and check if it has valid parentheses\n \"\"\"\n def remove_invalid_parentheses(self, s):\n dct = {'(': 1, ')': -1}\n count = 0\n str = ''\n for c in s:\n if c in dct:\n count += dct[c]\n else:\n count += 0\n\n if count >= 0:\n str += c\n else:\n count = 0\n\n return str\n\n def removeInvalidParentheses(self, s): \n \"\"\"\n : Type s: str\n : Rtype: List [str]\n \"\"\" \n import collections\n dct = {'(': 1, ')': - 1}\n cstr = self.remove_invalid_parentheses(s)\n def isValid(s): \n a = 0 \n for c in s:\n a += dct.get(c, 0)\n if a < 0:\n return False \n return a == 0\n\n visited = set([s])\n ans = []\n queue = collections.deque([s])\n DONE = False \n while queue:\n t = queue.popleft()\n if isValid(t):\n DONE = True\n ans.append(t)\n \n if DONE:\n continue\n\n \"\"\"\n If you use cstr the we can get rid of DONE check this way\n \"\"\"\n # if len(t) == len(cstr):\n # if isValid(t):\n # ans.append(t)\n # continue \n\n for x in range(len(t)):\n if t[x] not in dct:\n continue\n \n # remove each character and check\n\n ns = t[:x] + t[x + 1:]\n if ns not in visited:\n visited.add(ns)\n queue.append(ns)\n return ans\n\ns = Solution()\n\nprint(s.removeInvalidParentheses(\"(a)())()\"))\n\nclass SolutionPermuteWontWork:\n \"\"\"\n This solution wont work\n The issue with permuting strings is when the character comes in between parenthesis\n This means for (a)()()\n a will can be outside the parenthesis\n \"\"\"\n def permute(self, s, lst, step = 0):\n dct = {'(': 1, ')': -1}\n\n if len(s) == step:\n lst.append(''.join(s))\n\n for i in range(step, len(s)):\n arr = [c for c in s]\n arr[i], arr[step] = arr[step], arr[i]\n self.permute(arr, lst, step + 1)\n\n def is_valid(self, s):\n dct = {'(': 1, ')': -1}\n count = 0\n for c in s:\n if c in dct:\n count += dct[c]\n else:\n count += 0\n\n if count < 0:\n return False\n return count == 0\n\n def remove_wrong_parentheses(self, s):\n dct = {'(': 1, ')': -1}\n count = 0\n str = ''\n for c in s:\n if c in dct:\n count += dct[c]\n else:\n count += 0\n\n if count >= 0:\n str += c\n else:\n count = 0\n\n return str\n\n def removeInvalidParentheses(self, s): \n lst = []\n res = []\n cstr = self.remove_wrong_parentheses(s)\n self.permute(cstr, lst)\n for item in lst:\n if self.is_valid(item):\n res.append(item)\n return res\n\n# s = SolutionPermuteWontWork()\n# print(s.removeInvalidParentheses(\"(a)())()\"))","sub_path":"strings/301_remove_invalid_parentheses.py","file_name":"301_remove_invalid_parentheses.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"269294117","text":"#!/usr/bin/env python2\n\nfrom __future__ import print_function\nimport nltk\nimport sys\n\nassert len(sys.argv) == 2\n\nwith open(sys.argv[1]) as f:\n wordCounter = {}\n for sentence in map(nltk.tokenize.word_tokenize,\n nltk.tokenize.sent_tokenize(f.read())):\n for word in sentence:\n if word in wordCounter:\n wordCounter[word] += 1\n else:\n wordCounter[word] = 1\n\nfor word, count in wordCounter.items():\n print(word, ':', count)\n","sub_path":"preprocess/freq_nltk.py","file_name":"freq_nltk.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"176828707","text":"# binary search\ndef binarySearch(arr, l, r, key):\n if r>=0:\n m = l + (r-l)//2\n # print(m)\n if arr[m] == key:\n return m\n elif arr[m] > key:\n return binarySearch(arr, l, m-1, key)\n else:\n return binarySearch(arr, m+1, r, key)\n else:\n print(f\"{key} no found in {arr}\")\n\n\nt = int(input())\nwhile t:\n n, k = map(int, input().split())\n orginal = list(map(int, input().split()))\n\n # sorted_seq will have a copy of orginal seq but it will be sorted\n sorted_seq = orginal.copy()\n sorted_seq.sort()\n\n\n printed_ans = False # just a flag to represent if I found the answer\n\n for i in range(n):\n seqence_of_index = [] # a seq that will store the list of indexes of same number in sorted one\n\n # sorting ke bad number ko jaha hona chahiye agr wo initially waha nhi raha to..\n if orginal[i] != sorted_seq[i]:\n\n # j will have the index of i'th elm from orginal array\n j = binarySearch(sorted_seq, 0, n-1, orginal[i])\n seqence_of_index.append(j)\n\n # there is a possibility of repeatation hence I will move forward to see if the number is repeated again\n while j+1 < n:\n if sorted_seq[j+1] == orginal[i]:\n seqence_of_index.append(j+1)\n j+=1\n else:\n break\n # and also move backward to see if the number is repeated again in sorted list\n j = seqence_of_index[0]\n while j-1 > -1:\n if sorted_seq[j-1] == orginal[i]:\n seqence_of_index.append(j-1)\n j-=1\n else:\n break\n\n divide_huya = False # another flag to represent if swap of original[i] is possible or not\n for j in seqence_of_index:\n if abs(j-i) >= k and abs(j-i)//k == abs(j-i)/k:\n divide_huya = True\n \n # if divide nhi huya to kitna v try krlo k offset ke sath kavi us number ko sahi jgh nhi daal skte\n if not divide_huya:\n print(\"no\")\n printed_ans = True\n break\n\n if not printed_ans:\n print(\"yes\")\n\n t-=1\n\n\"\"\"\n2\n4 1\n1 4 2 3\n4 2\n1 4 2 3\n\n\nyes\nno\n\n\"\"\"","sub_path":"LTIME83B/shuffle.py","file_name":"shuffle.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"88453981","text":"import serial\nimport time\nimport os \nimport sys\n\ndef shell(ser):\n while True:\n cmd = input() + '\\n'\n ser.write(cmd.encode())\n if cmd == 'loadimg\\n':\n loadimg(ser)\n else:\n data = ser.read_until(terminator = b'# ').decode()\n print('Pi:', data, end = '')\n\ndef loadimg(ser):\n print('wait pi response')\n print('Pi:', ser.read_until(terminator=b'PleaseLoadimg').decode())\n \n # read file size\n with open(sys.argv[1], 'rb') as f:\n data = f.read()\n size = len(data)\n \n # send kerinel size to RPi\n line = str(size) + '\\n'\n ser.write(line.encode())\n print('wait pi response')\n print('Pi:', ser.read_until(terminator = b'RecvSizeDone').decode())\n \n #read file\n data = list()\n i = 0\n with open(sys.argv[1], 'rb') as f:\n while True:\n b = f.read(1)\n if not b:\n break\n else:\n i += 1\n data.append(b) \n if i%128 == 0:\n ser.write(b''.join(data))\n print('wait pi response')\n print('Pi:', ser.read_until(terminator = b'RecvChunckImgDone').decode())\n data.clear()\n #send file\n if data:\n ser.write(b''.join(data))\n \n print('wait pi response')\n print('Pi:', ser.read_until(terminator = b'RecvImgDone').decode())\n \n\nser = serial.Serial(\n port = sys.argv[2],\\\n baudrate=115200\n )\n\ntry:\n shell(ser)\n \nexcept PermissionError as e:\n print(\"Check your permission!\");\n","sub_path":"lab2/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"135544655","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\n\n\ndef MuAndLamb(mess,B,B_err):\n #First we need to read in the txt data and make it 3 colums (to sig 1 pi)\n unsorted = np.loadtxt(\"multifit_\"+mess+\".txt\", skiprows=1, unpack=True)\n\n sig1=unsorted[0][0:31:3]\n pi=unsorted[0][1:32:3]\n sig2=unsorted[0][2:33:3]\n\n\n sig1_err=unsorted[1][0:31:3]\n pi_err=unsorted[1][1:32:3]\n sig2_err=unsorted[1][2:33:3]\n \n if mess == \"14\": #because first fit didnt converge\n sig1=sig1[1:]\n pi=pi[1:]\n sig2=sig2[1:]\n sig1_err=sig1_err[1:]\n pi_err=pi_err[1:]\n sig2_err=sig2_err[1:]\n #INTERESTING OBSERVATION The last two values of unsorted[0] seem flipt as sig2 < pi (multifit_10.txt)\n\n\n\n #now we can plot the pi data and fit a function\n # maka sure function goes throu zero...\n k=np.arange(0,11)\n if mess ==\"14\": #beacause first fit didnt converge\n k=np.arange(1,11)\n\n plt.scatter(pi,k,label=\"Pi Linien\")\n def fitfunction(x,a,b,c,d,e): #4th order polynomial TODO experiement with different orders\n return a*x**4+b*x**3+c*x**2+d*x+e\n\n popt, pcov = curve_fit(fitfunction,pi,k)\n plt.plot(pi,fitfunction(pi,*popt),label=\"Fit\")\n plt.xlabel(\"Abstand a in Pixeln\")\n plt.ylabel(\"Ordnung k\")\n plt.title(\"PI Linien mit Fit vierter Ordnung I=\"+ mess +\"A \")\n plt.legend()\n plt.ylim(0,11)\n plt.xlim(500,1750)\n plt.savefig(\"pi_\"+mess+\".jpg\")\n plt.close() \n\n #next we need the del_k values for wich we imput the sigma values into our function\n #and subtract the order to get the deviation from set order\n del_k1=k-fitfunction(sig1,*popt) #=-(fit-k) because sig1 smaller pi\n del_k2=fitfunction(sig2,*popt)-k\n del_k=np.concatenate((del_k1,del_k2))\n\n #we calculate the mean and std to get a value we can use for del_lamb\n del_k_mean = np.mean(del_k)\n del_k_std = np.std(del_k)\n print(\"Messung mit \"+mess+\" Ampere:\")\n print(\"Delta K: \",del_k_mean)\n print(\"Standartabweichung: \",del_k_std)\n\n #using del_a/DEL_a = del_k and (7) we can now calculate del_lamb\n n=1.4567\n d=4.04*10**-3 #[m] \n lamb= 643.8023930075673*10**-9 #[m]\n lamb_err = 0.00249506113782 *10**-9 #[m]\n del_lamb=del_k_mean*lamb**2/(2*d*np.sqrt(n**2-1)) #Equation (7)\n del_lamb_err = np.sqrt((del_lamb/del_k_mean*del_k_std)**2+(2*del_lamb/lamb*lamb_err)**2) #both d and n have no errors given\n \n\n # next we calculate the magnetic moment of Cadmium using the energy split detailed in (8) (Theory part) \n # The Gab enery corresponds to a change in lamda wich we already calculated and can therfore use\n h= 6.626 *10**-34 #plank constant [Js]\n c=299792458 #speed of light [m/s]\n E=h*c/(lamb+del_lamb)-h*c/(lamb) #CAREFULL NEW FORMULA FOR ZEEMANN SPLIT ENERGY!!!!!!!\n E_err=h*c*np.sqrt((del_lamb_err/(lamb+del_lamb)**2)**2+(lamb_err/(lamb+del_lamb)**2-lamb_err/lamb**2)**2)\n\n\n mu_B= E/B #Magnetic moment\n mu_B_err= np.sqrt((E_err/B)**2+(E/B**2*B_err)**2)\n\n print(\"Magnetisches Moment: \",mu_B)\n print(\"Fehler: \",mu_B_err)\n print(\"---------------------------------------\")\n return E #Only Value needed in the next part\n\n\n\n\n#we now calculate mu_B differently, usiing a plot of E vs B \n#the slope of the resulting line should euqal mu_B\nB_list=np.array([0.526,0.592,0.6319])#*10**-3 #values of plt.show()tabelle1.txt [T] TODO Last digit WRONG !!!\n#holy shit!! my firs guess was 0.632\n\nE_10=MuAndLamb(\"10\",B_list[0],0.00205)\nE_12=MuAndLamb(\"12\",B_list[1],0.00124)\nE_14=MuAndLamb(\"14\",B_list[2],0.1) #TODO Error still wrong\ndef fitfunction(x,a): #To Fit mu_B\n return a*x\n\nE_list=[E_10,E_12,E_14]\n#print(E_list)\nplt.scatter(B_list,E_list)\npopt, pcov= curve_fit(fitfunction,B_list,E_list)\n#plt.plot(B_list,fitfunction(B_list,popt))\nprint(\"Das Magnetische Moment ist:\",popt)\nprint(\"Fehler:\",np.sqrt(pcov)) #TODO ist pcov der richtige Fehler?\n\n","sub_path":"f44/piplot.py","file_name":"piplot.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"83033410","text":"from django.urls import path\nfrom . import views\n\n\n# TEMPLATE URLS!!\napp_name = 'polls'\n\nurlpatterns = [\n path('', views.IndexView.as_view(), name='index'),\n path('/',views.DetailView.as_view(), name='detail'),\n path('/results/',views.ResultsView.as_view(),name='results'),\n path('/vote/', views.vote, name='vote'),\n \n\n path('create/',views.QuestionsCreateView.as_view(),name='create'),\n path('createChoice/',views.ChoiceCreateView.as_view(),name='createChoice'),\n path('createChoice/',views.DetailView.as_view(),name='detail'),\n path('update//',views.QuestionsUpdateView.as_view(),name='update'),\n path('delete//',views.QuestionsDeleteView.as_view(),name='delete'), \n]","sub_path":"polls_project/polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"59545931","text":"'''\nThis file will get automatically copied into the solutions zipfile (all over the place)\n'''\n\n\nimport os, glob\nimport SCons\nimport subprocess\nimport hashlib\n\nin_repo = 'isy-academy' in os.getcwd()\nif in_repo:\n import course_helpers\n\n\ndef run(env, command, arguments='', outfiles=None):\n abs_command = os.path.abspath(command)\n chmod = env.AlwaysBuild(env.Alias('chmod'+abs_command, [], 'chmod +x ' + abs_command)) \n\n if outfiles == None:\n outfiles = [] # not doing this as default parameter, cause we want a fresh list every time this function is called\n elif str == type(outfiles):\n outfiles = [outfiles]\n\n if not outfiles: # if there are no other files, hack something with listing *.pyc as output\n # this seems a good trigger if there aren't other outputfiles to this command\n # though SCons seems to crash if you already have output files, to be looked into ..\n outfiles.append(abs_command+'c') \n\n fullcommand = \" \".join((abs_command, arguments))\n\n x = env.Alias('run', env.Command(outfiles, [abs_command, chmod], fullcommand))\n env.Clean(x, outfiles)\n return x\n\n\n\ndef run_with_automatic_redirect(env, command, params):\n '''\n A helper function for running the same program with different arguments\n SCons would normally prevent this, as they all have the same input files,\n and as far as it can tell, they're just aliasses. (Would seem that SCons doesn't look at the actual command of the alias)\n\n But by adding an artificial output file (using tee) SCons will just run like we expect\n '''\n params = params + ' | tee ' # hacking tee in there, works in combination with outfile below\n outfile = os.path.abspath('.temp'+ hashlib.sha224(command+params).hexdigest()) # short unique output file\n return run(env, command, params+outfile, outfiles=[outfile])\n\n\n\n","sub_path":"python fundamentals/PythonFundamentals_exercises_solutions/exceptions/python_exercise_helpers.py","file_name":"python_exercise_helpers.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"432027400","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nMINS_PER_DAY = 1440\n\ndef plot_io_curves(io_series, filepath, min_intervals,toll_series = None,implement_tolls=False):\n indices = np.arange(0, MINS_PER_DAY, min_intervals)\n fig, ax1 = plt.subplots()\n for (input_series, output_series) in io_series:\n ax1.plot(indices, input_series,label='Before toll')\n ax1.plot(indices, output_series,label='After toll')\n if implement_tolls:\n ax2 = ax1.twinx()\n ax2.plot(indices, toll_series, '-r')\n ax2.set_ylabel('Toll values ($)', color='r')\n ax1.legend(loc=4)\n ax1.set_ylabel('Cumulative number of vehicles')\n ax1.set_xlabel('Time from midnight (mins)')\n if not os.path.exists(os.path.dirname(filepath)):\n os.makedirs(os.path.dirname(filepath))\n plt.savefig(filepath)\n plt.close()\n\ndef plot_demand_congestion(demands, congestion, filepath, congestion_spillover = None, min_intervals = 5, num_bins = 288):\n indices = np.arange(0, num_bins*min_intervals - min_intervals, min_intervals)\n fig, ax1 = plt.subplots()\n ax2 = ax1.twinx()\n if congestion_spillover is not None:\n ax2.plot(indices, congestion, 'b-',label='Zone c')\n ax2.plot(indices, congestion_spillover, 'r-', label='Zone a')\n ax2.legend()\n ax2.set_ylim([0,120])\n else:\n demand_colors = ['y-', 'g-', 'r-']\n for i in range(len(demands)):\n ax1.plot(indices, demands[i], demand_colors[i])\n ax2.plot(indices, congestion, 'b-')\n ax1.set_ylabel('Demand (num/min)')\n ax1.set_xlabel('Time from midnight (mins)')\n ax2.set_ylabel('Congestion (accumulation/output) (min)', color='b')\n if not os.path.exists(os.path.dirname(filepath)):\n os.makedirs(os.path.dirname(filepath))\n plt.savefig(filepath)\n plt.close()","sub_path":"src/main/python/model_test/plot_curves.py","file_name":"plot_curves.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"3334591","text":"\"\"\"\r\n\n\nGiven a string containing digits from `2-9` inclusive, return all possible\nletter combinations that the number could represent. A mapping of digit to\nletters (just like on the telephone buttons) is given below. Note that 1 does\nnot map to any letters.\n\n![Alternative Text](https://edabit-challenges.s3.amazonaws.com/200px-\nTelephone-keypad2.svg.png)\n\n### Examples\n\n letter_combinations(\"23\") ➞ [\"ad\", \"ae\", \"af\", \"bd\", \"be\", \"bf\", \"cd\", \"ce\", \"cf\"]\n \n letter_combinations(\"532\") ➞ [\"jda\", \"jdb\", \"jdc\", \"jea\", \"jeb\", \"jec\", \"jfa\", \"jfb\", \"jfc\", \"kda\", \"kdb\", \"kdc\", \"kea\", \"keb\", \"kec\", \"kfa\", \"kfb\", \"kfc\", \"lda\", \"ldb\", \"ldc\", \"lea\", \"leb\", \"lec\", \"lfa\", \"lfb\", \"lfc\"]\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\ndef letter_combinations(digits):\n p=['','','abc','def','ghi','jkl','mno','pqrs','tuv','wxyz']\n l,comb=[''],lambda m,n:[i+j for i in m for j in n]\n for d in digits: l=comb(l,p[int(d)])\n return l\n\n","sub_path":"xEGFoPmMm28h7HQ7a_17.py","file_name":"xEGFoPmMm28h7HQ7a_17.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"28146377","text":"import json\n\nclass Config:\n\n\tdef isIgnored(self, database):\n\n\t\twith open('dbignore.json', 'r') as f:\n\t\t\tread_data = f.read()\n\n\t\tdbs = json.loads(read_data)\n\t\tread_data = None\n\n\t\tif database in dbs:\n\t\t\tdbs = None\n\t\t\treturn True\n\t\telse:\n\t\t\tdbs = None\n\t\t\treturn False\n\n","sub_path":"lib/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"347796707","text":"###################################################################################################\n#\n# test_halo_concentration.py (c) Benedikt Diemer\n# \t\t\t\t \t benedikt.diemer@cfa.harvard.edu\n#\n###################################################################################################\n\nimport unittest\nimport numpy as np\n\nfrom colossus.tests import test_colossus\nfrom colossus.cosmology import cosmology\nfrom colossus.halo import concentration\n\n###################################################################################################\n# TEST CASES\n###################################################################################################\n\nclass TCConcentration(test_colossus.ColosssusTestCase):\n\n\tdef setUp(self):\n\n\t\tpass\n\n\t###############################################################################################\n\n\tdef test_model_returns(self):\n\n\t\tcosmology.setCosmology('bolshoi', {'persistence': ''})\n\t\tM_one = 1E12\n\t\tM_one_array = np.array([1E12])\n\t\tM_many = np.array([1E10, 1E12, 1E15])\n\t\tN_array = len(M_many)\n\t\tmdefs = ['200c', 'vir', '200m', '345m']\n\t\tz = 0.0\n\t\tmodels = concentration.models\n\t\t\n\t\tfor k in models.keys():\n\t\t\tfor j in range(len(mdefs)):\n\n\t\t\t\tc, mask = concentration.concentration(M_one, mdefs[j], z = z, model = k, range_return = True, range_warning = False)\n\t\t\t\tself.assertNotIsInstance(c, np.ndarray, 'Concentration should be scalar float, model %s, mdef %s.' % (k, mdefs[j]))\n\t\t\t\tself.assertNotIsInstance(mask, np.ndarray, 'Mask should be scalar bool, model %s, mdef %s.' % (k, mdefs[j]))\n\t\t\t\n\t\t\t\tc, mask = concentration.concentration(M_one_array, mdefs[j], z = z, model = k, range_return = True, range_warning = False)\n\t\t\t\tself.assertIsInstance(c, np.ndarray, 'Concentration should be an array with one element, model %s, mdef %s.' % (k, mdefs[j]))\n\t\t\t\tself.assertIsInstance(mask, np.ndarray, 'Mask should be an array with one element, model %s, mdef %s.' % (k, mdefs[j]))\n\t\t\t\tself.assertEqual(len(c), 1, 'Concentration should be an array with one element, model %s, mdef %s.' % (k, mdefs[j]))\n\t\t\t\tself.assertEqual(len(mask), 1, 'Mask should be an array with one element, model %s, mdef %s.' % (k, mdefs[j]))\n\n\t\t\t\tc, mask = concentration.concentration(M_many, mdefs[j], z = z, model = k, range_return = True, range_warning = False)\n\t\t\t\tself.assertIsInstance(c, np.ndarray, 'Concentration should be an array with multiple elements, model %s, mdef %s.' % (k, mdefs[j]))\n\t\t\t\tself.assertIsInstance(mask, np.ndarray, 'Mask should be an array with multiple elements, model %s, mdef %s.' % (k, mdefs[j]))\n\t\t\t\tself.assertEqual(len(c), N_array, 'Concentration should be an array with multiple elements, model %s, mdef %s.' % (k, mdefs[j]))\n\t\t\t\tself.assertEqual(len(mask), N_array, 'Mask should be an array with multiple elements, model %s, mdef %s.' % (k, mdefs[j]))\n\n\t\t\t\tc = concentration.concentration(M_one, mdefs[j], z = z, model = k, range_return = False, range_warning = False)\n\t\t\t\tself.assertNotIsInstance(c, np.ndarray, 'Concentration should be scalar float, model %s, mdef %s.' % (k, mdefs[j]))\n\t\t\t\n\t\t\t\tc = concentration.concentration(M_one_array, mdefs[j], z = z, model = k, range_return = False, range_warning = False)\n\t\t\t\tself.assertIsInstance(c, np.ndarray, 'Concentration should be an array with one element, model %s, mdef %s.' % (k, mdefs[j]))\n\t\t\t\tself.assertEqual(len(c), 1, 'Concentration should be an array with one element, model %s, mdef %s.' % (k, mdefs[j]))\n\t\t\t\t\n\t\t\t\tc = concentration.concentration(M_many, mdefs[j], z = z, model = k, range_return = False, range_warning = False)\n\t\t\t\tself.assertIsInstance(c, np.ndarray, 'Concentration should be an array with multiple elements, model %s, mdef %s.' % (k, mdefs[j]))\n\t\t\t\tself.assertEqual(len(c), N_array, 'Concentration should be an array with multiple elements, model %s, mdef %s.' % (k, mdefs[j]))\n\n\t###############################################################################################\n\n\tdef test_model_values(self):\n\t\tcosmology.setCosmology('bolshoi', {'persistence': ''})\n\t\tM = 1E12\n\t\tz = 0.5\n\t\tmdef = '257m'\n\t\tmodels = concentration.models\n\t\tfor k in models.keys():\n\t\t\tmsg = 'Failure in model = %s' % (k)\n\t\t\tc = concentration.concentration(M, mdef, z = z, model = k, range_return = False, \n\t\t\t\t\t\t\t\t\t\trange_warning = False)\n\t\t\tif k == 'bullock01':\n\t\t\t\tself.assertAlmostEqual(c, 6.835529245640e+00, msg = msg)\n\t\t\telif k == 'duffy08':\n\t\t\t\tself.assertAlmostEqual(c, 5.844133619011e+00, msg = msg)\n\t\t\telif k == 'klypin11':\n\t\t\t\tself.assertAlmostEqual(c, 9.328978854869e+00, msg = msg)\n\t\t\telif k == 'prada12':\n\t\t\t\tself.assertAlmostEqual(c, 7.553796065867e+00, msg = msg)\n\t\t\telif k == 'bhattacharya13':\n\t\t\t\tself.assertAlmostEqual(c, 5.862359898743e+00, msg = msg)\n\t\t\telif k == 'dutton14':\n\t\t\t\tself.assertAlmostEqual(c, 7.590718273862e+00, msg = msg)\n\t\t\telif k == 'diemer15':\n\t\t\t\tself.assertAlmostEqual(c, 6.818549860585e+00, msg = msg)\n\t\t\telif k == 'diemer15_orig':\n\t\t\t\tself.assertAlmostEqual(c, 6.650400105982e+00, msg = msg)\n\t\t\telif k == 'klypin16_nu':\n\t\t\t\tself.assertAlmostEqual(c, 6.458910543577e+00, msg = msg)\n\t\t\telif k == 'klypin16_m':\n\t\t\t\tself.assertAlmostEqual(c, 6.210791674864e+00, msg = msg)\n\t\t\telif k == 'ludlow16':\n\t\t\t\tself.assertAlmostEqual(c, 7.623113634479e+00, msg = msg)\n\t\t\telif k == 'child18':\n\t\t\t\tself.assertAlmostEqual(c, 6.695361703210e+00, msg = msg)\n\t\t\telif k == 'diemer19':\n\t\t\t\tself.assertAlmostEqual(c, 6.799845154993e+00, msg = msg)\n\t\t\telse:\n\t\t\t\tmsg = 'Unknown model, %s.' % k\n\t\t\t\traise Exception(msg)\n\n\t###############################################################################################\n\t\n\tdef test_hard_fail(self):\n\t\tc = concentration.concentration(1E16, 'vir', z = 0.0, model = 'bullock01', range_return = False, range_warning = False)\n\t\tself.assertEqual(c, concentration.INVALID_CONCENTRATION)\n\n\t###############################################################################################\n\n\t# If interpolation = False, the slope is computed \"manually\" in the concentration routine. This \n\t# function tests how different the result is from the derivative function in the Cosmology module.\n\n\tdef test_PkSlopeComputation(self):\n\t\t\n\t\tM = 1E1\n\t\tz = 30.0\n\t\tcosmo = cosmology.setCosmology('bolshoi', {'persistence': ''})\n\t\tk_R = concentration._diemer15_k_R(M)\n\t\tcosmo.interpolation = True\n\t\tn1 = concentration._diemer15_n(k_R)\n\t\tc1 = concentration.modelDiemer15fromM(M, z, statistic = 'median')\n\t\tcosmo.interpolation = False\n\t\tn2 = concentration._diemer15_n(k_R)\n\t\tc2 = concentration.modelDiemer15fromM(M, z, statistic = 'median')\n\t\terr1 = abs(n2 / n1 - 1.0)\n\t\terr2 = abs(c2 / c1 - 1.0)\n\t\tself.assertLess(err1, 1E-3)\n\t\tself.assertLess(err2, 1E-3)\n\t\t\t\n###################################################################################################\n# TRIGGER\n###################################################################################################\n\nif __name__ == '__main__':\n\tunittest.main()\n","sub_path":"simulations/Colossus/colossus/tests/test_halo_concentration.py","file_name":"test_halo_concentration.py","file_ext":"py","file_size_in_byte":6889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"587737850","text":"# authors: TFTAuthors\nimport tensorflow as tf\nfrom keras.applications import densenet\nimport matplotlib.pyplot as plt\nfrom keras.preprocessing import image\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom cleverhans import utils_tf\nfrom PIL import Image\n\n# User imports our library\nfrom tft.robustness_image import AdversarialInputs, AdversarialPatches\nfrom tft.generalization_image import FourierRadialFiltering,ObjectRecognitionWeakSignals, RotationTranslation\nfrom tft.interpretability_image import ModelInterpretation\nfrom tft.performance_image.performance_metrics import PerformanceMetrics\n\nimport os\n\nclass Model:\n \"\"\"\n Define a Model to be loaded by the user.\n \"\"\"\n\n def __init__(self, modelpath, model_file):\n\n self.image_size_height = 224\n self.image_size_width = 224\n self.num_channels = 3\n self.sess, self.model, self.logits = self.load_model(modelpath, model_file)\n self.y = tf.placeholder(tf.float32, shape=(None, 1000))\n self.loss = utils_tf.model_loss(self.y, self.model.outputs[0], mean=False)\n self.x = self.model.inputs[0]\n\n def preprocess_input(self, im):\n im2 = im.astype(np.float32)\n im2 /= 127.5\n im2 -= 1\n return im2\n \n def rev_preprocess(self,im):\n im += 1\n im *= 127.5\n return im\n\n def pre_process(self, imgs):\n images_pre_processed = []\n for a_image in imgs:\n # a_image is of shape (dim1, dim2, channels)\n a_image = np.expand_dims(a_image, axis=0) # line reshapes to (1, dim1, dim2, channels)\n images_pre_processed.append(\n self.preprocess_input(a_image)) # Preprocess the input image as per inception_v3\n\n return np.vstack(images_pre_processed) # Stack the images one below the other (rows)\n\n def predict(self, images): # images will have values from 0 - 255\n \"\"\"\n param images: a list of image tensors of shape (nSamples, H, W, C) ; \n\t\twhere H represents height, W represents width and C represents channels of an image respectively\n \"\"\"\n # probabilities = tf.nn.softmax(self.logits)\n probs = self.model.outputs[0]\n inputs = self.model.inputs[0]\n # print(images.shape)\n pre_processed_images = self.pre_process(images)\n # print(pre_processed_images.shape)\n result = self.sess.run(probs, {inputs: pre_processed_images})\n #print(\"Result\",result)\n return result\n\n# def load_model(self):\n# print(\"Inside load_model\")\n# sess = tf.Session(graph=K.get_session().graph)\n# K.set_session(sess)\n# model = densenet.DenseNet121(weights='imagenet',classes=1000)\n# logits = model.outputs[0].op.inputs[0]\n# return sess, model, logits\n\n def load_model(self, h5modelpath, model_file_name):\n\n sess = tf.Session(graph=K.get_session().graph)\n K.set_session(sess)\n model = densenet.DenseNet121(weights='imagenet',classes=1000) # loads both architecture & weights\n model.save(h5modelpath, model_file_name) # saving the model (graph + weights) in a single .h5 file\n logits = model.outputs[0].op.inputs[0]\n \n print(\"Model loaded..\")\n return sess, model, logits\n\n\nPATH_TO_THE_MODEL = r'' # Absolute path to model\nIMAGE_SAMPLES_FOLDER = r'' # Absolute path to folder where test data/Image samples are present\nIMAGE_VS_LABELS_CSV = r'' # Absolute path to image-label csv\nPATH_TO_SAVE_RESULTS = r'' # Absolute path to save results - must be \\teachntest\\assets\\results\nPATH_TO_JSON_INDEX_CLASS_MAPPING = r'' # a local absolute path to a .json file that contains the index-class mapping\nPROJECT_NAME = r'' # A string which represents a name under which an test/test is performed\nmodel_file_name = r'' # A string which is the file name along with the extension of the model under test. A .h5 file name in this case.\n\nmodel = Model(PATH_TO_THE_MODEL, model_file_name)\n\n# print (\"Input_Placeholder-->\", model.model.inputs[0])\n# print (\"Logits-->\", model.logits)\n\ntest0 = AdversarialPatches( model,IMAGE_SAMPLES_FOLDER, IMAGE_VS_LABELS_CSV, PATH_TO_SAVE_RESULTS, PROJECT_NAME,\n PATH_TO_JSON_INDEX_CLASS_MAPPING, 1000, PATH_TO_THE_MODEL, model_file_name,'input_1:0',\n 'fc1000/BiasAdd:0', 16, (-1,1), 'rectangle', 4, learning_rate=5.0) \nprint(\"\\n\\n results are :\\n\\n\",test0.run()) \n\ntest1 = AdversarialInputs(model, IMAGE_SAMPLES_FOLDER, IMAGE_VS_LABELS_CSV, PATH_TO_SAVE_RESULTS, PROJECT_NAME,PATH_TO_JSON_INDEX_CLASS_MAPPING, threshold=0.1)\nresult = test1.fgsm(0.015) # epsilon value\nprint(result)\n\nresult = test1.cw() # Carlini & Wagner Method: can take optional learning rate & number of iterations\nprint(result)\n\n# test2 = FourierRadialFiltering(model, IMAGE_SAMPLES_FOLDER, IMAGE_VS_LABELS_CSV, PATH_TO_SAVE_RESULTS,PATH_TO_JSON_INDEX_CLASS_MAPPING, radius=0.4,threshold=0.1)\n# result = test2.run()\n# print(result)\n\n# test3 = ModelInterpretation(model, IMAGE_SAMPLES_FOLDER, IMAGE_VS_LABELS_CSV, PATH_TO_SAVE_RESULTS,PATH_TO_JSON_INDEX_CLASS_MAPPING, num_features=5, num_samples=200, hide_rest=True)\n# result = test3.run()\n# print(result)\n\n# test4 = ObjectRecognitionWeakSignals(model, IMAGE_SAMPLES_FOLDER, IMAGE_VS_LABELS_CSV, PATH_TO_SAVE_RESULTS,PATH_TO_JSON_INDEX_CLASS_MAPPING, threshold=0.1)\n# result = test4.generate_gray_scale()\n# print(result)\n\n# result = test4.generate_low_contrast(contrast_level_1=0.6)\n# print(result)\n\n# result = test4.generate_noisy(noise_width=0.1, contrast_level_2=0.3)\n# print(result)\n\n\"\"\"\n- reach: float, controlling the strength of the manipulation\n- coherence: a float within [0, 1] with 1 = full coherence\n- grain: float, controlling how fine-grained the distortion is\n\"\"\"\n# result = test4.generate_eidolon(grain=10.0, coherence=1.0, reach=2.0)\n# print(result)\n\n# test5 = RotationTranslation(model, IMAGE_SAMPLES_FOLDER, IMAGE_VS_LABELS_CSV, PATH_TO_SAVE_RESULTS,PATH_TO_JSON_INDEX_CLASS_MAPPING, threshold=0.1)\n# result = test5.run()\n# print(result)\n\n# PerformanceMetrics\npm = PerformanceMetrics(model, \"input_1:0\", PATH_TO_THE_MODEL, model_file_name, True, PATH_TO_SAVE_RESULTS, PROJECT_NAME, IMAGE_SAMPLES_FOLDER, IMAGE_VS_LABELS_CSV, model.image_size_height, model.image_size_width)\npm.compute()","sub_path":"userModel-densenet.py","file_name":"userModel-densenet.py","file_ext":"py","file_size_in_byte":6383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"461618910","text":"import unittest\nimport sys\nfrom src.Bullet import Bullet \n\nclass BulletTest(unittest.TestCase):\n\n def test_bullet_should_be_constructed_from_length(self):\n bullets = Bullet.listFromLength(10);\n self.assertEqual(10, len(bullets));\n\n def test_bullet_should_drop_left(self):\n bullets = Bullet.listFromLength(10);\n bullet = bullets[0]\n bullet = bullet.dropLeft();\n bullet = bullet.dropLeft();\n self.assertEqual(-1,bullet.getPosition())\n\n def test_bullet_should_drop_right(self):\n bullets = Bullet.listFromLength(10);\n bullet = bullets[0]\n bullet = bullet.dropRight();\n self.assertEqual(0.5,bullet.getPosition())\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"tests/BulletTest.py","file_name":"BulletTest.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"619145518","text":"#!usr/bin/python3\r\n\r\n#link list generation submodule\r\n\r\nimport selenium\r\nfrom selenium import webdriver\r\n#from selenium.webdriver.chrome.options import Options\r\nfrom selenium.webdriver.firefox.options import Options\r\n\r\n#set up headless driver options\r\noptions = Options()\r\noptions.headless = True\r\n#$options.executable_path = '/usr/bin/chromedriver'\r\n\r\ndownload_queue = []\r\n\r\ndef build_download_queue_recursive(seed_link_list, recursion_depth, recursion_width):\r\n \r\n global download_queue\r\n \r\n download_queue = []\r\n print(\" === Building the Download Queue. This may take a while . . .\")\r\n recursive_grab_from_list(seed_link_list, recursion_depth, recursion_width)\r\n # print(\" === The Download Queue is now built. Total: \" + str(len(download_queue)) + \" videos will be gotten.\")\r\n print(\" === The Download Queue is now built: \" + str(download_queue))\r\n return download_queue\r\n \r\ndef add_to_download_queue(list_to_add):\r\n\r\n global download_queue\r\n\r\n #clean out items that are already downloaded\r\n f = open('logs.txt', 'r')\r\n already_downloaded = f.read().splitlines()\r\n f.close()\r\n \r\n print (\" === [l] Cleaning download_queue\")\r\n \r\n for elem in list_to_add:\r\n if elem in already_downloaded:\r\n print (\" ====== removing \" + elem)\r\n list_to_add.remove(elem)\r\n \r\n #just to be safe I list(set()) this here, though list_to_add should be safe.\r\n download_queue = list(set(download_queue + list_to_add))\r\n \r\n #print(\" === add_to_download_queue() ended with queue: \" + str(download_queue))\r\n\r\n\r\ndef get_next_seed_link_list (current_link, recursion_width):\r\n\r\n global options\r\n\r\n #open a webdriver using hthe chrome webdriver\r\n driver = webdriver.Firefox(options=options)\r\n driver.get(current_link)\r\n\r\n #get all elements that would be referenced by the css code .videoblock{...}\r\n element_list = driver.find_elements_by_css_selector('.videoblock')\r\n\r\n #listcomprehension to get all those _vkey (the media key that is needed to get the URL)\r\n vkey_list = [x.get_attribute('_vkey') for x in element_list]\r\n \r\n next_link_list = []\r\n\r\n for n in range (0, recursion_width): \r\n if (n+4) < len(vkey_list):\r\n next_link_list.append(\"https://www.pornhub.com/view_video.php?viewkey=\" + vkey_list[n+4])\r\n else:\r\n print(\" =e= Error. Skipping\")\r\n\r\n #quit, not close, as quit is better at cleaning up the terminal windows, and preventing segfaultybois\r\n driver.quit()\r\n return next_link_list\r\n\r\n\r\ndef recursive_grab_from_list(seed_link_list, recursion_depth, recursion_width):\r\n\r\n if recursion_depth == 1:\r\n\r\n #add seed_link_list to the download queue\r\n add_to_download_queue(seed_link_list)\r\n return 1\r\n\r\n for seed_link in seed_link_list:\r\n\r\n templist = ['']\r\n templist[0] = seed_link\r\n\r\n #add seed_link (aka templist) to the download queue\r\n add_to_download_queue(templist)\r\n\r\n #generate next seed_link_list\r\n next_seed_link_list = get_next_seed_link_list(seed_link, recursion_width)\r\n\r\n #recurse with depth - 1\r\n recursive_grab_from_list(next_seed_link_list, recursion_depth-1, recursion_width)\r\n\r\n","sub_path":"Linux/linklist_gen.py","file_name":"linklist_gen.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"272092941","text":"# loading the package nltk, load it for this file.\nimport nltk\n\ndef open_file_and_get_text(filename):\n # open the file, as a read-only.\n with open(filename, 'r') as our_file:\n # takes the file and reads it.\n text = our_file.read()\n return text\n\ndef clean_tokens(words):\n # creates an empty list\n clean_words = []\n for word in words:\n # fills with lowercased words\n clean_words.append(word.lower())\n return clean_words\n\n\n# variable 'filename' for where our file is.\nfilename = \"eyre.txt\"\n\n# print the first 99 characters of the text.\n# print(text[0:100])\n\ntext = open_file_and_get_text(filename)\n\n\n# take long string and break into words.\nwords = nltk.word_tokenize(text)\n# print(words[0:10])\n\nclean_words = clean_tokens(words)\nword_counts = nltk.FreqDist(clean_words)\nprint(word_counts.most_common(10))\nprint(word_counts['jane'])\nnltk.Text(clean_words).dispersion_plot(['he','she','jane','tony'])\n","sub_path":"python/eyre.py","file_name":"eyre.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"465342639","text":"#!/usr/bin/env python\n\nimport sys\n\ncount=0\ncurrentkey=None\nfor line in sys.stdin:\n\tline=line.strip()\n\tkey,value=line.split('\\t')\n\t#print(words[0])\n\tif currentkey==key:\n\t\tcount=count+1\n\telse:\n\t\tif currentkey:\n\t\t\tprint (\"{0}\\t{1}\".format(currentkey,count))\n\t\tcurrentkey=key\n\t\tcount=1\nif currentkey==key:\n\tprint (\"{0}\\t{1}\".format(currentkey,count))\n","sub_path":"task4/reduce.py","file_name":"reduce.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"252134878","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n@author: HuRuiFeng\n@file: vm.py\n@time: 2020/8/19 21:28\n@project: wasm-python-book\n@desc:\n\"\"\"\nfrom ch08.binary.module import Module\nfrom ch08.binary.opcodes import Call\nfrom ch08.interpreter.instr_control import call\nfrom ch08.interpreter.instructions import instr_table\nfrom ch08.interpreter.vm_global import GlobalVar\nfrom ch08.interpreter.vm_memory import Memory\nfrom ch08.interpreter.vm_stack_control import ControlStack, ControlFrame\nfrom ch08.interpreter.vm_stack_operand import OperandStack\n\n\nclass VM(OperandStack, ControlStack):\n def __init__(self, module=None, memory=None):\n super(VM, self).__init__()\n if module is None:\n module = Module()\n self.module = module\n self.memory = memory\n # 用于存放模块实例的全局变量\n self.globals = []\n # 用来记录当前函数的第一个局部变量\n self.local_0_idx = None\n self.frames = []\n self.slots = []\n\n def init_mem(self):\n \"\"\"\n 内存初始化\n Wasm模块可以导入或者定义一块内存,还有一个数据段专门用来存放内存初始化数据\n \"\"\"\n # 如果模块定义了内存,就先创建内存实例并分配必要的内存页\n if len(self.module.mem_sec) > 0:\n self.memory = Memory(self.module.mem_sec[0])\n\n for data in self.module.data_sec:\n for instr in data.offset:\n self.exec_instr(instr)\n\n # 指令执行完毕后,留在操作数栈顶的就是内存起始地址\n self.memory.write(self.pop_u64(), data.init)\n\n def init_globals(self):\n for global_var in self.module.global_sec:\n for instr in global_var.init:\n self.exec_instr(instr)\n self.globals.append(GlobalVar(global_var.type, self.pop_u64()))\n\n def enter_block(self, opcode, bt, instrs):\n \"\"\"\n 进入函数(或控制块)时使用\n \"\"\"\n bp = self.stack_size - len(bt.param_types)\n cf = ControlFrame(opcode, bt, instrs, bp)\n self.push_control_frame(cf)\n if opcode == Call:\n self.local_0_idx = int(bp)\n\n def exit_block(self):\n cf = self.pop_control_frame()\n self.clear_block(cf)\n\n def clear_block(self, cf):\n results = self.pop_u64s(len(cf.bt.result_types))\n self.pop_u64s(self.stack_size - cf.bp)\n self.push_u64s(results)\n if cf.opcode == Call and self.control_depth > 0:\n last_call_frame, _ = self.top_call_frame()\n self.local_0_idx = int(last_call_frame.bp)\n\n def reset_block(self, cf):\n results = self.pop_u64s(len(cf.bt.param_types))\n self.pop_u64s(self.stack_size - cf.bp)\n self.push_u64s(results)\n\n def exec_code(self, idx):\n \"\"\"一条一条执行函数指令\"\"\"\n code = self.module.code_sec[idx]\n for _, instr in enumerate(code.expr):\n self.exec_instr(instr)\n\n def exec_instr(self, instr):\n \"\"\"指令分派逻辑:采用查表法\"\"\"\n instr_table[instr.opcode](self, instr.args)\n\n def loop(self, verbose_flag):\n depth = self.control_depth\n while self.control_depth >= depth:\n cf = self.top_control_frame\n if cf.pc == len(cf.instrs):\n self.exit_block()\n else:\n instr = cf.instrs[cf.pc]\n if verbose_flag:\n print(\"PC={}, opcode={}, instrs={}, slots={}\".format(cf.pc, instr.opcode,\n instr_table[instr.opcode].__name__,\n self.slots))\n cf.pc += 1\n self.exec_instr(instr)\n\n\ndef exec_main_func(module, verbose_flag=False):\n vm = VM(module)\n vm.init_mem()\n vm.init_globals()\n call(vm, module.start_sec)\n vm.loop(verbose_flag)\n","sub_path":"src/ch08/interpreter/vm.py","file_name":"vm.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"401352634","text":"import numpy\n\n\nclass Solution:\n \"\"\"\n https://www.acwing.com/solution/leetcode/content/2366/\n O(n^3), however TLE\n \"\"\"\n def numSubmatrixSumTarget(self, matrix: list, target: int) -> int:\n matrix = numpy.array(matrix)\n res = 0\n for i in range(len(matrix)):\n for i_ in range(i+1, len(matrix)+1):\n sub_prefix = matrix[i:i_].sum(0)\n prefix_hash = {0: 1}\n for j in range(len(sub_prefix)):\n prefix_sum = sub_prefix[0:j+1].sum()\n sub_target = prefix_sum - target\n if sub_target in prefix_hash:\n res += prefix_hash[sub_target]\n if prefix_sum in prefix_hash:\n prefix_hash[prefix_sum] += 1\n else:\n prefix_hash[prefix_sum] = 1\n return res\n\n\nif __name__ == '__main__':\n solution = Solution()\n test_cases = [\n ([[0,1,0],[1,1,1],[0,1,0]], 0),\n ([[1,-1],[-1,1]], 0)\n ]\n\n for mat, target in test_cases:\n print(solution.numSubmatrixSumTarget(mat, target))\n","sub_path":"1074_Number_of_Submatrices_That_Sum_to_Target/solution_2.py","file_name":"solution_2.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"18884285","text":"from flask import Blueprint, request, jsonify\nfrom santas import SantaDAO\nfrom postgres import PostgresInjection\n\n__author__ = 'debal'\n\napi = Blueprint('staysanta-api', __name__)\n\npostgresql = PostgresInjection()\n\n\n@api.route('/', methods=['POST'])\ndef add_santa():\n santa_type = request.args\n santa_dao = SantaDAO(postgresql)\n santa_name = request.form[\"name\"]\n santa_index = request.form[\"index\"]\n santa_address = request.form[\"address\"]\n santa_email = request.form[\"email\"]\n\n santa_dict = {'name': santa_name, 'index': santa_index, 'address': santa_address, 'email': santa_email}\n santa_dao.put_santa(santa_dict)\n return jsonify(santa_dict), 200\n\n\n@api.route('/stats', methods=['GET'])\ndef get_stats():\n santa_dao = SantaDAO(postgresql)\n stats = santa_dao.get_stats()\n return jsonify(stats), 200","sub_path":"flask/route.py","file_name":"route.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"634466538","text":"import time\n\nfrom own import percentage\nfrom own import print_time\n\n\n@print_time\ndef _test1():\n print('work is running')\n\n\ndef _test2():\n for i in range(100):\n percentage(i + 1, 100)\n time.sleep(0.1)\n\n\nif __name__ == '__main__':\n # _test1()\n _test2()\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"620617987","text":"from __future__ import print_function\nimport numpy as np\nimport sys\n\nnp.random.seed(1337) # for reproducibility\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D, MaxPooling2D, GaussianNoise\nfrom keras.optimizers import SGD, Adam\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\nfrom keras.preprocessing.image import ImageDataGenerator\nimport numpy as np\nfrom keras.utils.np_utils import to_categorical\n\nWEIGHTS_FILE = \"mnist_cnn_weights.txt\"\nID = \"204785240.txt\"\n\nprint(WEIGHTS_FILE)\nbatch_size = 50\nnb_classes = 10\nnb_epoch = 500\n\nlr = 0.001\nprint(\"Batch Size:\" + str(batch_size))\nprint(\"Learning Rate: \" + str(lr))\n# input image dimensions\nimg_rows, img_cols = 28, 28\n# number of convolutional filters to use\nnb_filters = [64, 128, 256, 512]\nprint(\"Filters: \" + str(nb_filters))\n# size of pooling area for max pooling\nnb_pool = 2\n# convolution kernel size\nnb_conv = 3\n\n\ndef train_from_file():\n # the data, shuffled and split between train and test sets\n csv = np.genfromtxt('./data/train.txt', delimiter=\",\")\n X_train = csv[:, 1:785]\n\n Y_train = csv[:, 0]\n\n X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)\n\n print('X_train shape:', X_train.shape)\n print('Y_train shape:', Y_train.shape)\n print(X_train.shape[0], 'train samples')\n\n # np.save(\"TrainX\", X_train)\n # np.save(\"TrainY\", Y_train)\n\n return X_train, Y_train\n\n\n# def train_from_past():\n# X_train = np.load(\"TrainX.npy\")\n# Y_train = np.load(\"TrainY.npy\")\n# return X_train, Y_train\n\n\nX_train, Y_train = train_from_file()\n\nY_train = to_categorical(Y_train, nb_classes)\n\n\ndef val_from_file():\n csv = np.genfromtxt('./data/validate1.txt', delimiter=\",\")\n X_val1 = csv[:, 1:785]\n Y_val1 = csv[:, 0]\n\n X_val1 = X_val1.reshape(X_val1.shape[0], 1, img_rows, img_cols)\n\n # np.save(\"ValX1\", X_val1)\n # np.save(\"ValY1\", Y_val1)\n\n csv = np.genfromtxt('./data/validate2.txt', delimiter=\",\")\n X_val2 = csv[:, 1:785]\n Y_val2 = csv[:, 0]\n\n X_val2 = X_val2.reshape(X_val2.shape[0], 1, img_rows, img_cols)\n\n # np.save(\"ValX2\", X_val2)\n # np.save(\"ValY2\", Y_val2)\n return X_val1, Y_val1, X_val2, Y_val2\n\n\n# def val_from_past():\n# X_val1 = np.load(\"ValX1.npy\")\n# Y_val1 = np.load(\"ValY1.npy\")\n# X_val2 = np.load(\"ValX2.npy\")\n# Y_val2 = np.load(\"ValY2.npy\")\n# return X_val1, Y_val1, X_val2, Y_val2\n\n\nX_val1, Y_val1, X_val2, Y_val2 = val_from_file()\n\nY_val1 = to_categorical(Y_val1, nb_classes)\nY_val2 = to_categorical(Y_val2, nb_classes)\n\nX_train = np.concatenate((X_train, X_val1), axis=0)\nY_train = np.concatenate((Y_train, Y_val1), axis=0)\n\nX_train = np.concatenate((X_train, X_val2), axis=0)\nY_train = np.concatenate((Y_train, Y_val2), axis=0)\n\n\ndef Model(weights_path=None):\n model = Sequential()\n model.add(GaussianNoise(0.01, input_shape=(1, img_rows, img_cols)))\n model.add(Convolution2D(nb_filters[0], nb_conv, nb_conv,\n border_mode='valid'))\n model.add(Activation('relu'))\n\n model.add(Convolution2D(nb_filters[1], nb_conv, nb_conv))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))\n model.add(Dropout(0.25))\n\n model.add(Convolution2D(nb_filters[2], nb_conv, nb_conv))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))\n model.add(Dropout(0.25))\n\n model.add(Convolution2D(nb_filters[3], nb_conv, nb_conv))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(128))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n\n if weights_path:\n model.load_weights(weights_path)\n\n return model\n\n\ndef predict_test(model):\n csv = np.genfromtxt('./data/test.txt', delimiter=\",\")\n X_val = csv[:, 1:785]\n Y_val = csv[:, 0]\n\n X_val = X_val.reshape(X_val.shape[0], 1, img_rows, img_cols)\n\n predictions = model.predict_classes(X_val, verbose=True)\n # wrting output to file\n text_file = open(\"./\" + ID, \"w\")\n\n for p in predictions:\n text_file.write(str(p) + \"\\n\")\n\n text_file.close()\n\n\ndef main():\n model = Model()\n if (sys.argv[1] == \"test\"):\n global nb_epoch\n nb_epoch = 0\n global WEIGHTS_FILE\n WEIGHTS_FILE = sys.argv[2]\n\n adam = Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n model.compile(loss='categorical_crossentropy',\n optimizer=adam)\n\n datagen = ImageDataGenerator(\n featurewise_center=False,\n featurewise_std_normalization=False,\n rotation_range=15,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=False)\n\n datagen.fit(X_train)\n callbacks = [ModelCheckpoint(WEIGHTS_FILE, monitor='val_loss', verbose=1, save_best_only=True, mode='auto'),\n EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='auto')]\n model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),\n samples_per_epoch=len(X_train), nb_epoch=nb_epoch, validation_data=(X_val1, Y_val1),\n show_accuracy=True, callbacks=callbacks)\n\n model.load_weights(WEIGHTS_FILE)\n predict_test(model)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Ex1Sol/mnist_cnn.py","file_name":"mnist_cnn.py","file_ext":"py","file_size_in_byte":5455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"305621809","text":"import gym\nimport numpy as np\nimport itertools\nimport matplotlib.pyplot as plt\n\ndef policy(obs, sum = 20):\n return 0 if obs[0] >= sum else 1\n\ndef playEpisode(obs, env, done=False):\n state = obs[0]\n while not done:\n action = policy(obs)\n obs, reward, done, _ = env.step(action)\n\n return state, reward, obs[1], obs[2]\n\ndef plot_blackjack_values(V):\n\n def get_Z(x, y, usable_ace, runtime):\n if (x,y,usable_ace, runtime) in V:\n return V[x ,y ,usable_ace, runtime]\n else:\n return 0\n\n def get_figure(usable_ace, ax, xlim, runtime):\n \n x_range = np.arange(12, xlim)\n y_range = np.arange(1, 11)\n X, Y = np.meshgrid(x_range, y_range)\n Z = np.array([get_Z(x,y,usable_ace, runtime) for x,y in zip(np.ravel(X), np.ravel(Y))]).reshape(X.shape)\n\n surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=plt.cm.coolwarm, vmin=-1.0, vmax=1.0)\n ax.set_zlim(-1,1)\n ax.set_ylim(1,10)\n ax.set_xlim(12,xlim-1)\n ax.set_xlabel('Player\\'s Current Sum')\n ax.set_ylabel('Dealer\\'s Showing Card')\n ax.set_zlabel('State Value')\n ax.view_init(ax.elev, -120)\n\n fig = plt.figure(figsize=(20, 20))\n\n ax = fig.add_subplot(221, projection='3d')\n ax.set_title('Usable Ace 10.000 episodes')\n get_figure(True, ax, 22, 10000)\n\n ax = fig.add_subplot(222, projection='3d')\n ax.set_title('No Usable Ace 10.000 episodes')\n get_figure(False, ax, 21, 10000)\n\n ax = fig.add_subplot(223, projection='3d')\n ax.set_title('Usable Ace 500.000 episodes')\n get_figure(True, ax, 22, 500000)\n\n ax = fig.add_subplot(224, projection='3d')\n ax.set_title('No Usable Ace 500.000 episodes')\n get_figure(False, ax, 21, 500000)\n plt.savefig('Ex2a.pdf', dpi=1000)\n plt.show()\n \ndef main():\n runtimes = [10000, 500000]\n states = [i for i in range(12,22)]\n dealer_states = [i for i in range(1,11)]\n states = list(itertools.product(states, dealer_states, [True, False], runtimes))\n returns = dict(zip(states, [[] for i in range(len(states))]))\n for runtime in runtimes:\n env = gym.make('Blackjack-v0')\n for _ in range(runtime):\n obs = env.reset() # obs is a tuple: (player_sum, dealer_card, useable_ace)\n state, reward, dealercard, usableAce = playEpisode(obs, env)\n state = 12 if state <= 12 else state\n returns[(state, dealercard, usableAce, runtime)].append(float(reward))\n V = {}\n for key, value in returns.items():\n if len(value) > 0:\n V[key] = (sum(value)/len(value))\n plot_blackjack_values(V)\n \n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"ex04-mc/ex04-mc.py","file_name":"ex04-mc.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"145034729","text":"import re\nimport requests\nfrom datetime import datetime, timedelta\nfrom bs4 import BeautifulSoup\nfrom multiprocessing import Pool\nfrom operator import itemgetter\nfrom database import create_movies_table, insert_to_movies_table\n\n# -----------------------------------------------\ndef round_rating(rating):\n if rating != None and rating != 0:\n rounded_rating = round(float(rating), 1)\n return rounded_rating\n else:\n return None\ndef num_thousand_space(num):\n\tif num != None:\n\t if len(num) != 1:\n\t if len(num) != 2:\n\t if len(num) > 2:\n\t num_r = round(int(num)/1000, 1)\n\t num_r = int(num_r*1000)\n\t else:\n\t num_r = round(int(num)/10)\n\t num_r = int(num_r*10)\n\t else:\n\t num_r = num\n\t return '{:,}'.format(int(num_r)).replace(',', ' ')\n\telse:\n\t\treturn None\ndef only_kp(data):\n if data.get('other') != None and data.get('other').get('kp_rating') != None:\n return float(data.get('other').get('kp_rating'))\n else:\n return float(0)\ndef only_imdb(data):\n if data.get('other') != None and data.get('other').get('imdb_rating') != None:\n return float(data.get('other').get('imdb_rating'))\n else:\n return float(0)\ndef get_string_index(index, string):\n\t# just\n return f'{index+1}. {string}'\n# =======================================================\ndef get_premiere_day(next_week=False):\n # today = datetime.utcnow() + timedelta(hours=6)\n today = datetime.utcnow() + timedelta(hours=2)\n if today.weekday() < 3:\n weeks_ex = 1\n else:\n weeks_ex = 0\n last_thursday = today - timedelta(days=today.weekday()-3, weeks = weeks_ex)\n if not next_week:\n days = []\n delta = today.date() - last_thursday.date()\n for i in range(delta.days + 1):\n day = last_thursday.date() + timedelta(days=i)\n days.append(day.strftime('%Y-%m-%d'))\n return tuple(days)\n else:\n next_thursday = last_thursday + timedelta(weeks=1)\n days = []\n delta = next_thursday.date() - today.date()\n for i in range(1, delta.days + 1):\n \tday = today.date() + timedelta(days=i)\n \tdays.append(day.strftime('%Y-%m-%d'))\n return tuple(days)\ndef get_rating(movie_id):\n url_of_rating = f'https://rating.kinopoisk.ru/{movie_id}.xml'\n r = requests.get(url_of_rating)\n soup = BeautifulSoup(r.text, 'lxml')\n \n try:\n kp_rating = soup.find('kp_rating').text\n kp_votes = soup.find('kp_rating')['num_vote']\n if kp_rating == '0':\n kp_rating = None\n kp_votes = None\n except:\n kp_rating = None\n kp_votes = None\n\n try:\n imdb_rating = soup.find('imdb_rating').text\n imdb_votes = soup.find('imdb_rating')['num_vote']\n except:\n imdb_rating = None\n imdb_votes = None\n\n kp_rating = round_rating(kp_rating)\n imdb_rating = round_rating(imdb_rating)\n\n kp_votes = num_thousand_space(kp_votes)\n imdb_votes = num_thousand_space(imdb_votes)\n\n ratings = {'kp_rating': kp_rating,\n 'kp_votes': kp_votes,\n 'imdb_rating': imdb_rating,\n 'imdb_votes': imdb_votes}\n\n return ratings\ndef get_poster(movie_id):\n\tif movie_id != None:\n\t\tposter_url = f'https://st.kp.yandex.net/images/film_iphone/iphone360_{movie_id}.jpg'\n\t\tstatus_code = requests.get(poster_url, allow_redirects = False).status_code\n\telse:\n\t\tstatus_code = None\n\tif status_code != None and status_code != 302:\n\t\treturn poster_url\n\telse:\n\t\treturn None\ndef get_kp_data(link_kp):\n\tif link_kp != None:\n\t\tmovie_id = re.search(r'\\d+', link_kp).group()\n\t\tlink_poster = get_poster(movie_id)\n\t\tratings = get_rating(movie_id)\n\t\tkp_data = {'link_kp': link_kp,\n\t\t\t\t 'poster': link_poster}\n\t\tkp_data.update(ratings)\n\t\treturn kp_data\n\telse:\n\t\treturn None\ndef get_data_inner_page(link_kz):\n\thtml = get_html(link_kz)\n\tsoup = BeautifulSoup(html, 'lxml')\n\ttry:\n\t\tlink = soup.find('table', {'class': 'infoTable'}).find('td', text='Ссылки').findNext('td').a\n\t\tif 'kinopoisk' in link.text:\n\t\t\tlink_kp = link.get('href')\n\t\telse:\n\t\t\tlink_kp = None\n\texcept:\n\t\tlink_kp = None\n\ttry:\n\t\tcountry_year = soup.find('table', {'class': 'infoTable'}).find('td', text='Производство').findNext('td').text.strip()\n\t\t*country, year = country_year.split(', ')\n\t\tif re.match(r'^\\d{4}$', year) == None:\n\t\t\tcountry = country_year.split(', ')\n\t\t\tyear = None\n\t\tcountry = ', '.join(country)\n\texcept:\n\t\tcountry, year = [None]*2\n\ttry:\n\t\tdirector = soup.find('table', {'class': 'staff-table'}).tbody.td.text.strip()\n\texcept:\n\t\tdirector = None\n\ttry:\n\t\tactors = soup.find('table', {'class': 'staff-table'}).findNext('table').tbody.find_all('td')\n\t\tactors = ', '.join([td.text.strip() for td in actors][:3])\n\texcept AttributeError:\n\t\tactors = None\n\tkp_data = get_kp_data(link_kp)\n\n\tresult = {'director': director,\n\t\t\t 'actors': actors,\n\t\t\t 'country': country,\n\t\t\t 'year': year,\n\t\t\t 'other': kp_data}\n\n\treturn result\ndef minimize_data(html_small):\n\thtml_small = BeautifulSoup(html_small, 'lxml')\n\ttry:\n\t\tdate_day = html_small.dl.dt.span.text\n\t\tif date_day[:2] == 'С ':\n\t\t\tdate_day = date_day[2:]\n\t\tdate_day_obj = datetime.strptime(date_day, '%d-%b-%Y').date()\n\texcept AttributeError as ex:\n\t\tdate_day_obj = None\n\n\thtml_small = html_small.find('dl').find_all('dt')\n\n\ttitle = html_small[1].text\n\tlink_kz = 'http://kino.kz' + html_small[1].a.get('href')\n\ttry:\n\t\tgenres = html_small[2].text\n\texcept IndexError:\n\t\tgenres = None\n\ttry:\n\t\tdescription = html_small[3].text\n\t\tif description == '':\n\t\t\tdescription = None\n\texcept IndexError:\n\t\tdescription = None\n\n\tdata_deep = get_data_inner_page(link_kz)\n\n\tresult = {'title': title,\n 'link_kz': link_kz,\n 'premiere_day': date_day_obj,\n 'genres': genres,\n 'description': description}\n\n\tresult.update(data_deep)\n\n\treturn result\n# -----------------------------------------------\ndef get_html(url):\n r = requests.get(url)\n return r.text\ndef get_data(html):\n soup = BeautifulSoup(html, 'lxml')\n htmls = soup.find('div', {'class': 'film-archive-list'}).find_all('div', {'class': 'movie_description'})\n return htmls\ndef get_list_of_minimized_data(htmls):\n\thtmls = [str(html) for html in htmls]\n\twith Pool() as p:\n\t\tlist_of_minimized_data = p.map(minimize_data, htmls)\n\treturn list(list_of_minimized_data)\ndef sort_data(data, soon=False):\n\tkp_ratings = map(only_kp, data)\n\timdb_ratings = map(only_imdb, data)\n\tif not soon:\n\t\tdata_sorted = [(d, (k, i)) for d, k, i in zip(data, kp_ratings, imdb_ratings)]\n\telse:\n\t\tdata_sorted = [(d, (i, k)) for d, i, k in zip(data, imdb_ratings, kp_ratings)]\n\tdata_sorted.sort(key=itemgetter(1), reverse=True)\n\treturn [item[0] for item in data_sorted]\n# -----------------------------------------------\ndef premieres(URL, soon=False):\n\thtml = get_html(URL)\n\thtmls = get_data(html)\n\tdata_list = get_list_of_minimized_data(htmls)\n\tdata_list_sorted = sort_data(data_list, soon)\n\treturn data_list_sorted\n\nif __name__ == '__main__':\n\tpass\n\t# # table_names = ['today', 'tomorrow', 'tomorrow2']\n\t# table_names = ['today']\n\t# create_movies_table(table_names)\n\t# for index, table in enumerate(table_names):\n\t# \tURL = f'http://kino.kz/?day={index}&lang=0'\n\t# \tdata = premieres(URL)\n\t# \tinsert_to_movies_table(table, data)\n\n\t# table_names = ['soon']\n\t# create_movies_table(table_names)\n\t# url = 'http://kino.kz/soon'\n\t# data = premieres(url, soon=True)\n\t# insert_to_movies_table(table_names[0], data)\n","sub_path":"new_version/premieres_parse_new.py","file_name":"premieres_parse_new.py","file_ext":"py","file_size_in_byte":7515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"133255065","text":"#!/usr/bin/python3\nfrom timeit import timeit\n\ndef magic():\n\n digits = []\n target = [10**x for x in range(0,7)]\n\n s = ''\n\n a, b, c = 1, 10**3, 10**6\n d = b\n\n l = 1\n\n while b <= c:\n for i in range(a,b):\n s += str(i)\n\n for n,i in enumerate(s):\n if n+l in target:\n digits.append(i)\n\n l += len(s)\n s = ''\n\n a = b\n b += d\n\n print(''.join(digits))\n\n x = 1\n for i in digits:\n x *= int(i)\n\n print(x)\n\nprint(timeit(magic,number=1))\n","sub_path":"40_champernownes_constant.py","file_name":"40_champernownes_constant.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"467372968","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport glob\nimport re\nimport codecs\nimport sys\nimport jctconv\nimport tfidf\nfrom wanalyser import wanalyser\n\ndef imaginary_words(words, num):\n u\"\"\"隣り合う単語をnum個つなげた新しい単語を作る\"\"\"\n iws = []\n for i in range(0, len(words)-(num)+1):\n ws = words[i:num+i]\n iws.append(\"\".join(ws))\n return iws\n\ndef entry(filename, words):\n f = open(filename,\"a\")\n for keyword in words:\n yomi = jctconv.kata2hira(keyword[0].decode('utf-8')).encode('utf-8')\n if yomi is not None:\n line = keyword[0]+','+yomi+','+str(keyword[1])+'\\n'\n if line:f.write(line)\n f.close()\n\nprint('----- load words -----')\nmin_w = 1.5 #最小重み\nall_words = [] #解析されたすべての単語\ncontents = [] #単語リストを文章単位にまとめたリスト\nanalyser = wanalyser()\n\ndef get_contents(i):\n counter = 1\n allLines = open('../jawiki-latest/jawiki-latest-pages-articles.xml-{0:0>3}.txt'.format(i), 'r').read()\n lines = allLines.split('\\n\\n')\n print(len(lines))\n for line in lines:\n _ws = analyser.extract_keyword(line.decode('utf-8'), lambda feature: feature[0] == \"名詞\")\n words = analyser.filter(_ws, lambda name: \"HIRAGANA\" in name or \"KATAKANA\" in name)\n if len(words) == 0:\n continue\n iws = [x for x in imaginary_words(words, 2) if x in line]\n _ = [x for x in iws if x not in all_words]\n all_words.extend(_)\n print(counter, '/', len(lines))\n counter+=1\n yield iws\n\nparam = sys.argv\n\nfor i in range(100):\n n = int(param[1])+i\n contents = [item for item in get_contents(str(n))]\n\n #最小重み以上のものを抽出\n new_words = list(set([x for x in tfidf.tfidf(contents) if x[1] > min_w]))\n\n entry('ex1_.csv', new_words)\n","sub_path":"ex1/ex1_.py","file_name":"ex1_.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"550452154","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# $Id: __init__.py 1381 2012-06-27 13:57:01Z g.rodola $\n#\n# Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"psutil is a module providing convenience functions for managing\nprocesses and gather system information in a portable way by using\nPython.\n\"\"\"\n\nfrom __future__ import division\n\n__version__ = \"0.5.0\"\nversion_info = tuple([int(num) for num in __version__.split('.')])\n\n__all__ = [\n # exceptions\n \"Error\", \"NoSuchProcess\", \"AccessDenied\", \"TimeoutExpired\",\n # constants\n \"NUM_CPUS\", \"TOTAL_PHYMEM\", \"BOOT_TIME\",\n \"version_info\", \"__version__\",\n \"STATUS_RUNNING\", \"STATUS_IDLE\", \"STATUS_SLEEPING\", \"STATUS_DISK_SLEEP\",\n \"STATUS_STOPPED\", \"STATUS_TRACING_STOP\", \"STATUS_ZOMBIE\", \"STATUS_DEAD\",\n \"STATUS_WAKING\", \"STATUS_LOCKED\",\n # classes\n \"Process\", \"Popen\",\n # functions\n \"test\", \"pid_exists\", \"get_pid_list\", \"process_iter\", \"get_process_list\",\n \"phymem_usage\", \"virtmem_usage\"\n \"cpu_times\", \"cpu_percent\", \"per_cpu_percent\",\n \"network_io_counters\", \"disk_io_counters\",\n ]\n\nimport sys\nimport os\nimport time\nimport signal\nimport warnings\nimport errno\nimport subprocess\ntry:\n import pwd\nexcept ImportError:\n pwd = None\n\nfrom psutil.error import Error, NoSuchProcess, AccessDenied, TimeoutExpired\nfrom psutil._compat import property, defaultdict\nfrom psutil._common import cached_property\nfrom psutil._common import (nt_disk_iostat as _nt_disk_iostat,\n nt_net_iostat as _nt_net_iostat)\nfrom psutil._common import (STATUS_RUNNING, STATUS_IDLE, STATUS_SLEEPING,\n STATUS_DISK_SLEEP, STATUS_STOPPED,\n STATUS_TRACING_STOP, STATUS_ZOMBIE, STATUS_DEAD,\n STATUS_WAKING, STATUS_LOCKED)\n\n# import the appropriate module for our platform only\nif sys.platform.startswith(\"linux\"):\n import psutil._pslinux as _psplatform\n from psutil._pslinux import (phymem_buffers,\n cached_phymem,\n IOPRIO_CLASS_NONE,\n IOPRIO_CLASS_RT,\n IOPRIO_CLASS_BE,\n IOPRIO_CLASS_IDLE)\n phymem_buffers = _psplatform.phymem_buffers\n cached_phymem = _psplatform.cached_phymem\n\nelif sys.platform.startswith(\"win32\"):\n import psutil._psmswindows as _psplatform\n from psutil._psmswindows import (ABOVE_NORMAL_PRIORITY_CLASS,\n BELOW_NORMAL_PRIORITY_CLASS,\n HIGH_PRIORITY_CLASS,\n IDLE_PRIORITY_CLASS,\n NORMAL_PRIORITY_CLASS,\n REALTIME_PRIORITY_CLASS)\n\nelif sys.platform.startswith(\"darwin\"):\n import psutil._psosx as _psplatform\n\nelif sys.platform.startswith(\"freebsd\"):\n import psutil._psbsd as _psplatform\n\nelse:\n raise NotImplementedError('platform %s is not supported' % sys.platform)\n\n__all__.extend(_psplatform.__extra__all__)\n\nNUM_CPUS = _psplatform.NUM_CPUS\nBOOT_TIME = _psplatform.BOOT_TIME\nTOTAL_PHYMEM = _psplatform.phymem_usage()[0]\n\n\nclass Process(object):\n \"\"\"Represents an OS process.\"\"\"\n\n def __init__(self, pid):\n \"\"\"Create a new Process object for the given pid.\n Raises NoSuchProcess if pid does not exist.\n \"\"\"\n self._pid = pid\n self._gone = False\n # platform-specific modules define an _psplatform.Process\n # implementation class\n self._platform_impl = _psplatform.Process(pid)\n self._last_sys_cpu_times = None\n self._last_proc_cpu_times = None\n # cache creation time for later use in is_running() method\n try:\n self.create_time\n except AccessDenied:\n pass\n except NoSuchProcess:\n raise NoSuchProcess(pid, None, 'no process found with pid %s' % pid)\n\n def __str__(self):\n try:\n pid = self.pid\n name = repr(self.name)\n except NoSuchProcess:\n details = \"(pid=%s (terminated))\" % self.pid\n except AccessDenied:\n details = \"(pid=%s)\" % (self.pid)\n else:\n details = \"(pid=%s, name=%s)\" % (pid, name)\n return \"%s.%s%s\" % (self.__class__.__module__,\n self.__class__.__name__, details)\n\n def __repr__(self):\n return \"<%s at %s>\" % (self.__str__(), id(self))\n\n def as_dict(self, attrs=[], ad_value=None):\n \"\"\"Utility method returning process information as a hashable\n dictionary.\n\n If 'attrs' is specified it must be a list of strings reflecting\n available Process class's attribute names (e.g. ['get_cpu_times',\n 'name']) else all public (read only) attributes are assumed.\n\n 'ad_value' is the value which gets assigned to a dict key in case\n AccessDenied exception is raised when retrieving that particular\n process information.\n \"\"\"\n excluded_names = set(['send_signal', 'suspend', 'resume', 'terminate',\n 'kill', 'wait', 'is_running', 'as_dict', 'parent',\n 'get_children', 'nice'])\n retdict = dict()\n for name in set(attrs or dir(self)):\n if name.startswith('_'):\n continue\n if name.startswith('set_'):\n continue\n if name in excluded_names:\n continue\n try:\n attr = getattr(self, name)\n if callable(attr):\n if name == 'get_cpu_percent':\n ret = attr(interval=0)\n else:\n ret = attr()\n else:\n ret = attr\n except AccessDenied:\n ret = ad_value\n except NotImplementedError:\n # in case of not implemented functionality (may happen\n # on old or exotic systems) we want to crash only if\n # the user explicitly asked for that particular attr\n if attrs:\n raise\n continue\n if name.startswith('get'):\n if name[3] == '_':\n name = name[4:]\n elif name == 'getcwd':\n name = 'cwd'\n retdict[name] = ret\n return retdict\n\n @property\n def pid(self):\n \"\"\"The process pid.\"\"\"\n return self._pid\n\n @cached_property\n def ppid(self):\n \"\"\"The process parent pid.\"\"\"\n return self._platform_impl.get_process_ppid()\n\n @property\n def parent(self):\n \"\"\"Return the parent process as a Process object. If no parent\n pid is known return None.\n \"\"\"\n ppid = self.ppid\n if ppid is not None:\n try:\n return Process(ppid)\n except NoSuchProcess:\n pass\n\n @cached_property\n def name(self):\n \"\"\"The process name.\"\"\"\n name = self._platform_impl.get_process_name()\n if os.name == 'posix':\n # On UNIX the name gets truncated to the first 15 characters.\n # If it matches the first part of the cmdline we return that\n # one instead because it's usually more explicative.\n # Examples are \"gnome-keyring-d\" vs. \"gnome-keyring-daemon\".\n cmdline = self.cmdline\n if cmdline:\n extended_name = os.path.basename(cmdline[0])\n if extended_name.startswith(name):\n name = extended_name\n # XXX - perhaps needs refactoring\n self._platform_impl._process_name = name\n return name\n\n @cached_property\n def exe(self):\n \"\"\"The process executable as an absolute path name.\"\"\"\n exe = self._platform_impl.get_process_exe()\n # if we have the cmdline but not the exe, figure it out from argv[0]\n if not exe:\n cmdline = self.cmdline\n if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):\n _exe = os.path.realpath(cmdline[0])\n if os.path.isfile(_exe) and os.access(_exe, os.X_OK):\n return _exe\n if not exe:\n raise AccessDenied(self.pid, self._platform_impl._process_name)\n return exe\n\n @cached_property\n def cmdline(self):\n \"\"\"The command line process has been called with.\"\"\"\n return self._platform_impl.get_process_cmdline()\n\n @property\n def status(self):\n \"\"\"The process current status as a STATUS_* constant.\"\"\"\n return self._platform_impl.get_process_status()\n\n if os.name == 'posix':\n\n @property\n def uids(self):\n \"\"\"Return a named tuple denoting the process real,\n effective, and saved user ids.\n \"\"\"\n return self._platform_impl.get_process_uids()\n\n @property\n def gids(self):\n \"\"\"Return a named tuple denoting the process real,\n effective, and saved group ids.\n \"\"\"\n return self._platform_impl.get_process_gids()\n\n @property\n def terminal(self):\n \"\"\"The terminal associated with this process, if any,\n else None.\n \"\"\"\n return self._platform_impl.get_process_terminal()\n\n @property\n def username(self):\n \"\"\"The name of the user that owns the process.\n On UNIX this is calculated by using *real* process uid.\n \"\"\"\n if os.name == 'posix':\n if pwd is None:\n # might happen if python was installed from sources\n raise ImportError(\"requires pwd module shipped with standard python\")\n return pwd.getpwuid(self.uids.real).pw_name\n else:\n return self._platform_impl.get_process_username()\n\n @cached_property\n def create_time(self):\n \"\"\"The process creation time as a floating point number\n expressed in seconds since the epoch, in UTC.\n \"\"\"\n return self._platform_impl.get_process_create_time()\n\n # available for Windows and Linux only\n if hasattr(_psplatform.Process, \"get_process_cwd\"):\n\n def getcwd(self):\n \"\"\"Return a string representing the process current working\n directory.\n \"\"\"\n return self._platform_impl.get_process_cwd()\n\n # Linux, BSD and Windows only\n if hasattr(_psplatform.Process, \"get_process_io_counters\"):\n\n def get_io_counters(self):\n \"\"\"Return process I/O statistics as a namedtuple including\n the number of read/write calls performed and the amount of\n bytes read and written by the process.\n \"\"\"\n return self._platform_impl.get_process_io_counters()\n\n def get_nice(self):\n \"\"\"Get process niceness (priority).\"\"\"\n return self._platform_impl.get_process_nice()\n\n def set_nice(self, value):\n \"\"\"Set process niceness (priority).\"\"\"\n return self._platform_impl.set_process_nice(value)\n\n # available only on Linux\n if hasattr(_psplatform.Process, \"get_process_ionice\"):\n\n def get_ionice(self):\n \"\"\"Return process I/O niceness (priority) as a namedtuple.\"\"\"\n return self._platform_impl.get_process_ionice()\n\n def set_ionice(self, ioclass, value=None):\n \"\"\"Set process I/O niceness (priority).\n ioclass is one of the IOPRIO_CLASS_* constants.\n iodata is a number which goes from 0 to 7. The higher the\n value, the lower the I/O priority of the process.\n \"\"\"\n return self._platform_impl.set_process_ionice(ioclass, value)\n\n # available on Windows and Linux only\n if hasattr(_psplatform.Process, \"get_process_cpu_affinity\"):\n\n def get_cpu_affinity(self):\n \"\"\"Get process current CPU affinity.\"\"\"\n return self._platform_impl.get_process_cpu_affinity()\n\n def set_cpu_affinity(self, cpus):\n \"\"\"Set process current CPU affinity.\n 'cpus' is a list of CPUs for which you want to set the\n affinity (e.g. [0, 1]).\n \"\"\"\n return self._platform_impl.set_process_cpu_affinity(cpus)\n\n if os.name == 'nt':\n\n def get_num_handles(self):\n \"\"\"Return the number of handles opened by this process\n (Windows only).\n \"\"\"\n return self._platform_impl.get_num_handles()\n\n if os.name == 'posix':\n\n def get_num_fds(self):\n \"\"\"Return the number of file descriptors opened by this\n process (POSIX only).\n \"\"\"\n return self._platform_impl.get_num_fds()\n\n def get_num_threads(self):\n \"\"\"Return the number of threads used by this process.\"\"\"\n return self._platform_impl.get_process_num_threads()\n\n def get_threads(self):\n \"\"\"Return threads opened by process as a list of namedtuples\n including thread id and thread CPU times (user/system).\n \"\"\"\n return self._platform_impl.get_process_threads()\n\n def get_children(self, recursive=False):\n \"\"\"Return the children of this process as a list of Process\n objects.\n If recursive is True return all the parent descendants.\n\n Example (A == this process):\n\n A ─┐\n │\n ├─ B (child) ─┐\n │ └─ X (grandchild) ─┐\n │ └─ Y (great grandchild)\n ├─ C (child)\n └─ D (child)\n\n >>> p.get_children()\n B, C, D\n >>> p.get_children(recursive=True)\n B, X, Y, C, D\n\n Note that in the example above if process X disappears\n process Y won't be returned either as the reference to\n process A is lost.\n \"\"\"\n if not self.is_running():\n name = self._platform_impl._process_name\n raise NoSuchProcess(self.pid, name)\n\n ret = []\n if not recursive:\n for p in process_iter():\n try:\n if p.ppid == self.pid:\n ret.append(p)\n except NoSuchProcess:\n pass\n else:\n # construct a dict where 'values' are all the processes\n # having 'key' as their parent\n table = defaultdict(list)\n for p in process_iter():\n try:\n table[p.ppid].append(p)\n except NoSuchProcess:\n pass\n # At this point we have a mapping table where table[self.pid]\n # are the current process's children.\n # Below, we look for all descendants recursively, similarly\n # to a recursive function call.\n checkpids = [self.pid]\n for pid in checkpids:\n for proc in table[pid]:\n ret.append(proc)\n if proc.pid not in checkpids:\n checkpids.append(proc.pid)\n return ret\n\n def get_cpu_percent(self, interval=0.1):\n \"\"\"Return a float representing the current process CPU\n utilization as a percentage.\n\n When interval is > 0.0 compares process times to system CPU\n times elapsed before and after the interval (blocking).\n\n When interval is 0.0 or None compares process times to system CPU\n times elapsed since last call, returning immediately.\n In this case is recommended for accuracy that this function be\n called with at least 0.1 seconds between calls.\n \"\"\"\n blocking = interval is not None and interval > 0.0\n if blocking:\n st1 = sum(cpu_times())\n pt1 = self._platform_impl.get_cpu_times()\n time.sleep(interval)\n st2 = sum(cpu_times())\n pt2 = self._platform_impl.get_cpu_times()\n else:\n st1 = self._last_sys_cpu_times\n pt1 = self._last_proc_cpu_times\n st2 = sum(cpu_times())\n pt2 = self._platform_impl.get_cpu_times()\n if st1 is None or pt1 is None:\n self._last_sys_cpu_times = st2\n self._last_proc_cpu_times = pt2\n return 0.0\n\n delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)\n delta_time = st2 - st1\n # reset values for next call in case of interval == None\n self._last_sys_cpu_times = st2\n self._last_proc_cpu_times = pt2\n\n try:\n # the utilization split between all CPUs\n overall_percent = (delta_proc / delta_time) * 100\n except ZeroDivisionError:\n # interval was too low\n return 0.0\n # the utilization of a single CPU\n single_cpu_percent = overall_percent * NUM_CPUS\n # on posix a percentage > 100 is legitimate\n # http://stackoverflow.com/questions/1032357/comprehending-top-cpu-usage\n # on windows we use this ugly hack to avoid troubles with float\n # precision issues\n if os.name != 'posix':\n if single_cpu_percent > 100.0:\n return 100.0\n return round(single_cpu_percent, 1)\n\n def get_cpu_times(self):\n \"\"\"Return a tuple whose values are process CPU user and system\n times. The same as os.times() but per-process.\n \"\"\"\n return self._platform_impl.get_cpu_times()\n\n def get_memory_info(self):\n \"\"\"Return a tuple representing RSS (Resident Set Size) and VMS\n (Virtual Memory Size) in bytes.\n\n On UNIX RSS and VMS are the same values shown by ps.\n\n On Windows RSS and VMS refer to \"Mem Usage\" and \"VM Size\" columns\n of taskmgr.exe.\n \"\"\"\n return self._platform_impl.get_memory_info()\n\n def get_memory_percent(self):\n \"\"\"Compare physical system memory to process resident memory and\n calculate process memory utilization as a percentage.\n \"\"\"\n rss = self._platform_impl.get_memory_info()[0]\n try:\n return (rss / float(TOTAL_PHYMEM)) * 100\n except ZeroDivisionError:\n return 0.0\n\n def get_memory_maps(self, grouped=True):\n \"\"\"Return process's mapped memory regions as a list of nameduples\n whose fields are variable depending on the platform.\n\n If 'grouped' is True the mapped regions with the same 'path'\n are grouped together and the different memory fields are summed.\n\n If 'grouped' is False every mapped region is shown as a single\n entity and the namedtuple will also include the mapped region's\n address space ('addr') and permission set ('perms').\n \"\"\"\n it = self._platform_impl.get_memory_maps()\n if grouped:\n d = {}\n for tupl in it:\n path = tupl[2]\n nums = tupl[3:]\n try:\n d[path] = map(lambda x, y: x+y, d[path], nums)\n except KeyError:\n d[path] = nums\n nt = self._platform_impl.nt_mmap_grouped\n return [nt(path, *d[path]) for path in d]\n else:\n nt = self._platform_impl.nt_mmap_ext\n return [nt(*x) for x in it]\n\n def get_open_files(self):\n \"\"\"Return files opened by process as a list of namedtuples\n including absolute file name and file descriptor number.\n \"\"\"\n return self._platform_impl.get_open_files()\n\n def get_connections(self, kind='inet'):\n \"\"\"Return connections opened by process as a list of namedtuples.\n The kind parameter filters for connections that fit the following\n criteria:\n\n Kind Value Connections using\n inet IPv4 and IPv6\n inet4 IPv4\n inet6 IPv6\n tcp TCP\n tcp4 TCP over IPv4\n tcp6 TCP over IPv6\n udp UDP\n udp4 UDP over IPv4\n udp6 UDP over IPv6\n all the sum of all the possible families and protocols\n \"\"\"\n return self._platform_impl.get_connections(kind)\n\n def is_running(self):\n \"\"\"Return whether this process is running.\"\"\"\n if self._gone:\n return False\n try:\n # Checking if pid is alive is not enough as the pid might\n # have been reused by another process.\n # pid + creation time, on the other hand, is supposed to\n # identify a process univocally.\n return self.create_time == \\\n self._platform_impl.get_process_create_time()\n except NoSuchProcess:\n self._gone = True\n return False\n\n def send_signal(self, sig):\n \"\"\"Send a signal to process (see signal module constants).\n On Windows only SIGTERM is valid and is treated as an alias\n for kill().\n \"\"\"\n # safety measure in case the current process has been killed in\n # meantime and the kernel reused its PID\n if not self.is_running():\n name = self._platform_impl._process_name\n raise NoSuchProcess(self.pid, name)\n if os.name == 'posix':\n try:\n os.kill(self.pid, sig)\n except OSError:\n err = sys.exc_info()[1]\n name = self._platform_impl._process_name\n if err.errno == errno.ESRCH:\n raise NoSuchProcess(self.pid, name)\n if err.errno == errno.EPERM:\n raise AccessDenied(self.pid, name)\n raise\n else:\n if sig == signal.SIGTERM:\n self._platform_impl.kill_process()\n else:\n raise ValueError(\"only SIGTERM is supported on Windows\")\n\n def suspend(self):\n \"\"\"Suspend process execution.\"\"\"\n # safety measure in case the current process has been killed in\n # meantime and the kernel reused its PID\n if not self.is_running():\n name = self._platform_impl._process_name\n raise NoSuchProcess(self.pid, name)\n # windows\n if hasattr(self._platform_impl, \"suspend_process\"):\n self._platform_impl.suspend_process()\n else:\n # posix\n self.send_signal(signal.SIGSTOP)\n\n def resume(self):\n \"\"\"Resume process execution.\"\"\"\n # safety measure in case the current process has been killed in\n # meantime and the kernel reused its PID\n if not self.is_running():\n name = self._platform_impl._process_name\n raise NoSuchProcess(self.pid, name)\n # windows\n if hasattr(self._platform_impl, \"resume_process\"):\n self._platform_impl.resume_process()\n else:\n # posix\n self.send_signal(signal.SIGCONT)\n\n def terminate(self):\n \"\"\"Terminate the process with SIGTERM.\n On Windows this is an alias for kill().\n \"\"\"\n self.send_signal(signal.SIGTERM)\n\n def kill(self):\n \"\"\"Kill the current process.\"\"\"\n # safety measure in case the current process has been killed in\n # meantime and the kernel reused its PID\n if not self.is_running():\n name = self._platform_impl._process_name\n raise NoSuchProcess(self.pid, name)\n if os.name == 'posix':\n self.send_signal(signal.SIGKILL)\n else:\n self._platform_impl.kill_process()\n\n def wait(self, timeout=None):\n \"\"\"Wait for process to terminate and, if process is a children\n of the current one also return its exit code, else None.\n \"\"\"\n if timeout is not None and not timeout >= 0:\n raise ValueError(\"timeout must be a positive integer\")\n return self._platform_impl.process_wait(timeout)\n\n # --- deprecated API\n\n @property\n def nice(self):\n \"\"\"Get or set process niceness (priority).\n Deprecated, use get_nice() instead.\n \"\"\"\n msg = \"this property is deprecated; use Process.get_nice() method instead\"\n warnings.warn(msg, category=DeprecationWarning, stacklevel=2)\n return self.get_nice()\n\n @nice.setter\n def nice(self, value):\n # invoked on \"p.nice = num\"; change process niceness\n # deprecated in favor of set_nice()\n msg = \"this property is deprecated; use Process.set_nice() method instead\"\n warnings.warn(msg, category=DeprecationWarning, stacklevel=2)\n return self.set_nice(value)\n\n\nclass Popen(Process):\n \"\"\"A more convenient interface to stdlib subprocess module.\n It starts a sub process and deals with it exactly as when using\n subprocess.Popen class but in addition also provides all the\n property and methods of psutil.Process class in a single interface:\n\n >>> import psutil\n >>> from subprocess import PIPE\n >>> p = psutil.Popen([\"/usr/bin/python\", \"-c\", \"print 'hi'\"], stdout=PIPE)\n >>> p.name\n 'python'\n >>> p.uids\n user(real=1000, effective=1000, saved=1000)\n >>> p.username\n 'giampaolo'\n >>> p.communicate()\n ('hi\\n', None)\n >>> p.terminate()\n >>> p.wait(timeout=2)\n 0\n >>>\n\n For method names common to both classes such as kill(), terminate()\n and wait(), psutil.Process implementation takes precedence.\n\n For a complete documentation refers to:\n http://docs.python.org/library/subprocess.html\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.__subproc = subprocess.Popen(*args, **kwargs)\n self._pid = self.__subproc.pid\n self._gone = False\n self._platform_impl = _psplatform.Process(self._pid)\n self._last_sys_cpu_times = None\n self._last_proc_cpu_times = None\n try:\n self.create_time\n except AccessDenied:\n pass\n except NoSuchProcess:\n raise NoSuchProcess(self._pid, None,\n \"no process found with pid %s\" % pid)\n\n def __dir__(self):\n return list(set(dir(Popen) + dir(subprocess.Popen)))\n\n def __getattribute__(self, name):\n try:\n return object.__getattribute__(self, name)\n except AttributeError:\n try:\n return object.__getattribute__(self.__subproc, name)\n except AttributeError:\n raise AttributeError(\"%s instance has no attribute '%s'\"\n %(self.__class__.__name__, name))\n\n\nget_pid_list = _psplatform.get_pid_list\npid_exists = _psplatform.pid_exists\n\n_pmap = {}\n\ndef process_iter():\n \"\"\"Return a generator yielding a Process class instance for all\n running processes on the local machine.\n\n Every new Process instance is only created once and then cached\n into an internal table which is updated every time this is used.\n \"\"\"\n def add(pid):\n proc = Process(pid)\n _pmap[proc.pid] = proc\n return proc\n\n def remove(pid):\n _pmap.pop(pid, None)\n\n a = set(get_pid_list())\n b = set(_pmap.keys())\n new_pids = a - b\n gone_pids = b - a\n\n for pid in gone_pids:\n remove(pid)\n for pid, proc in list(_pmap.items()):\n try:\n # use is_running() to check whether PID has been reused by\n # another process in which case yield a new Process instance\n if proc.is_running():\n yield proc\n else:\n yield add(pid)\n except NoSuchProcess:\n remove(pid)\n except AccessDenied:\n # Process creation time can't be determined hence there's\n # no way to tell whether the pid of the cached process\n # has been reused. Just return the cached version.\n yield proc\n for pid in new_pids:\n try:\n yield add(pid)\n except NoSuchProcess:\n pass\n\ndef cpu_times(percpu=False):\n \"\"\"Return system-wide CPU times as a namedtuple object.\n Every CPU time represents the time CPU has spent in the given mode.\n The attributes availability varies depending on the platform.\n Here follows a list of all available attributes:\n - user\n - system\n - idle\n - nice (UNIX)\n - iowait (Linux)\n - irq (Linux, FreeBSD)\n - softirq (Linux)\n\n When percpu is True return a list of nameduples for each CPU.\n First element of the list refers to first CPU, second element\n to second CPU and so on.\n The order of the list is consistent across calls.\n \"\"\"\n if not percpu:\n return _psplatform.get_system_cpu_times()\n else:\n return _psplatform.get_system_per_cpu_times()\n\n\n_last_cpu_times = cpu_times()\n_last_per_cpu_times = cpu_times(percpu=True)\n\ndef cpu_percent(interval=0.1, percpu=False):\n \"\"\"Return a float representing the current system-wide CPU\n utilization as a percentage.\n\n When interval is > 0.0 compares system CPU times elapsed before\n and after the interval (blocking).\n\n When interval is 0.0 or None compares system CPU times elapsed\n since last call or module import, returning immediately.\n In this case is recommended for accuracy that this function be\n called with at least 0.1 seconds between calls.\n\n When percpu is True returns a list of floats representing the\n utilization as a percentage for each CPU.\n First element of the list refers to first CPU, second element\n to second CPU and so on.\n The order of the list is consistent across calls.\n \"\"\"\n global _last_cpu_times\n global _last_per_cpu_times\n blocking = interval is not None and interval > 0.0\n\n def calculate(t1, t2):\n t1_all = sum(t1)\n t1_busy = t1_all - t1.idle\n\n t2_all = sum(t2)\n t2_busy = t2_all - t2.idle\n\n # this usually indicates a float precision issue\n if t2_busy <= t1_busy:\n return 0.0\n\n busy_delta = t2_busy - t1_busy\n all_delta = t2_all - t1_all\n busy_perc = (busy_delta / all_delta) * 100\n return round(busy_perc, 1)\n\n # system-wide usage\n if not percpu:\n if blocking:\n t1 = cpu_times()\n time.sleep(interval)\n else:\n t1 = _last_cpu_times\n _last_cpu_times = cpu_times()\n return calculate(t1, _last_cpu_times)\n # per-cpu usage\n else:\n ret = []\n if blocking:\n tot1 = cpu_times(percpu=True)\n time.sleep(interval)\n else:\n tot1 = _last_per_cpu_times\n _last_per_cpu_times = cpu_times(percpu=True)\n for t1, t2 in zip(tot1, _last_per_cpu_times):\n ret.append(calculate(t1, t2))\n return ret\n\ndef phymem_usage():\n \"\"\"Return the amount of total, used and free physical memory\n on the system in bytes plus the percentage usage.\n \"\"\"\n return _psplatform.phymem_usage()\n\ndef virtmem_usage():\n \"\"\"Return the amount of total, used and free virtual memory\n on the system in bytes plus the percentage usage.\n\n On Linux they match the values returned by free command line utility.\n On OS X and FreeBSD they represent the same values as returned by\n sysctl vm.vmtotal. On Windows they are determined by reading the\n PageFile values of MEMORYSTATUSEX structure.\n \"\"\"\n return _psplatform.virtmem_usage()\n\ndef disk_usage(path):\n \"\"\"Return disk usage statistics about the given path as a namedtuple\n including total, used and free space expressed in bytes plus the\n percentage usage.\n \"\"\"\n return _psplatform.get_disk_usage(path)\n\ndef disk_partitions(all=False):\n \"\"\"Return mounted partitions as a list of namedtuples including\n device, mount point, filesystem type and mount options (a raw\n string separated by commas which may vary depending on the platform).\n\n If \"all\" parameter is False return physical devices only and ignore\n all others.\n \"\"\"\n return _psplatform.disk_partitions(all)\n\ndef network_io_counters(pernic=False):\n \"\"\"Return network I/O statistics as a namedtuple including\n the following attributes:\n\n - bytes_sent: number of bytes sent\n - bytes_recv: number of bytes received\n - packets_sent: number of packets sent\n - packets_recv: number of packets received\n\n If pernic is True return the same information for every\n network interface installed on the system as a dictionary\n with network interface names as the keys and the namedtuple\n described above as the values.\n \"\"\"\n rawdict = _psplatform.network_io_counters()\n if not rawdict:\n raise RuntimeError(\"couldn't find any network interface\")\n if pernic:\n for nic, fields in rawdict.items():\n rawdict[nic] = _nt_net_iostat(*fields)\n return rawdict\n else:\n return _nt_net_iostat(*[sum(x) for x in zip(*rawdict.values())])\n\ndef disk_io_counters(perdisk=False):\n \"\"\"Return system disk I/O statistics as a namedtuple including\n the following attributes:\n\n - read_count: number of reads\n - write_count: number of writes\n - read_bytes: number of bytes read\n - write_bytes: number of bytes written\n - read_time: time spent reading from disk (in milliseconds)\n - write_time: time spent writing to disk (in milliseconds)\n\n If perdisk is True return the same information for every\n physical disk installed on the system as a dictionary\n with partition names as the keys and the namedutuple\n described above as the values.\n \"\"\"\n rawdict = _psplatform.disk_io_counters()\n if not rawdict:\n raise RuntimeError(\"couldn't find any physical disk\")\n if perdisk:\n for disk, fields in rawdict.items():\n rawdict[disk] = _nt_disk_iostat(*fields)\n return rawdict\n else:\n return _nt_disk_iostat(*[sum(x) for x in zip(*rawdict.values())])\n\ndef get_users():\n \"\"\"Return users currently connected on the system as a list of\n namedtuples including the following attributes.\n\n - user: the name of the user\n - terminal: the tty or pseudo-tty associated with the user, if any.\n - host: the host name associated with the entry, if any.\n - started: the creation time as a floating point number expressed in\n seconds since the epoch.\n \"\"\"\n return _psplatform.get_system_users()\n\n# http://goo.gl/jYLvf\ndef _deprecated(replacement=None):\n \"\"\"A decorator which can be used to mark functions as deprecated.\"\"\"\n def outer(fun):\n def inner(*args, **kwargs):\n msg = \"psutil.%s is deprecated\" % fun.__name__\n if replacement is not None:\n msg += \"; use %s instead\" % replacement\n warnings.warn(msg, category=DeprecationWarning, stacklevel=2)\n return fun(*args, **kwargs)\n return inner\n return outer\n\n# --- deprecated functions\n\n@_deprecated()\ndef get_process_list():\n \"\"\"Return a list of Process class instances for all running\n processes on the local machine (deprecated).\n \"\"\"\n return list(process_iter())\n\n@_deprecated(\"psutil.phymem_usage\")\ndef avail_phymem():\n return phymem_usage().free\n\n@_deprecated(\"psutil.phymem_usage\")\ndef used_phymem():\n return phymem_usage().used\n\n@_deprecated(\"psutil.virtmem_usage\")\ndef total_virtmem():\n return virtmem_usage().total\n\n@_deprecated(\"psutil.virtmem_usage\")\ndef used_virtmem():\n return virtmem_usage().used\n\n@_deprecated(\"psutil.virtmem_usage\")\ndef avail_virtmem():\n return virtmem_usage().free\n\ndef test():\n \"\"\"List info of all currently running processes emulating ps aux\n output.\n \"\"\"\n import datetime\n from psutil._compat import print_\n\n today_day = datetime.date.today()\n templ = \"%-10s %5s %4s %4s %7s %7s %-13s %5s %7s %s\"\n attrs = ['pid', 'username', 'get_cpu_percent', 'get_memory_percent', 'name',\n 'get_cpu_times', 'create_time', 'get_memory_info']\n if os.name == 'posix':\n attrs.append('terminal')\n print_(templ % (\"USER\", \"PID\", \"%CPU\", \"%MEM\", \"VSZ\", \"RSS\", \"TTY\", \"START\",\n \"TIME\", \"COMMAND\"))\n for p in sorted(process_iter(), key=lambda p: p.pid):\n try:\n pinfo = p.as_dict(attrs, ad_value='')\n except NoSuchProcess:\n pass\n else:\n if pinfo['create_time']:\n ctime = datetime.datetime.fromtimestamp(pinfo['create_time'])\n if ctime.date() == today_day:\n ctime = ctime.strftime(\"%H:%M\")\n else:\n ctime = ctime.strftime(\"%b%d\")\n cputime = time.strftime(\"%M:%S\", time.localtime(sum(pinfo['cpu_times'])))\n user = pinfo['username']\n if os.name == 'nt' and '\\\\' in user:\n user = user.split('\\\\')[1]\n vms = pinfo['memory_info'] and \\\n int(pinfo['memory_info'].vms / 1024) or '?'\n rss = pinfo['memory_info'] and \\\n int(pinfo['memory_info'].rss / 1024) or '?'\n memp = pinfo['memory_percent'] and \\\n round(pinfo['memory_percent'], 1) or '?'\n print_(templ % (user[:10],\n pinfo['pid'],\n pinfo['cpu_percent'],\n memp,\n vms,\n rss,\n pinfo.get('terminal', '') or '?',\n ctime,\n cputime,\n pinfo['name'].strip() or '?'))\n\nif __name__ == \"__main__\":\n test()\n\ndel property, cached_property, division\nif sys.version_info < (3, 0):\n del num\n","sub_path":"MBPyFunctionServer/psutil/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":37776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"106240234","text":"import base64\nimport string\nfrom base64 import b64encode\nfrom Crypto.Cipher import AES\nimport os\nimport sys\n\n\ndef pad(text):\n while len(text) % 16 != 0:\n text += '\\0'\n return str.encode(text)\n\n\ndef rsa_encrypt(rsa_key, aes_key):\n rsa_key = rsa_key.strip(string.punctuation)\n rsa_key = rsa_key.split(\",\")\n\n e = int(rsa_key[0])\n n = int(rsa_key[1])\n\n aes_key = str(aes_key)\n # print(type(aes_key))\n # print(\"encrypted key: \", aes_key)\n\n # Convert each letter in the plaintext to numbers based on the character using a^b mod m\n cipher = [(ord(char) ** e) % n for char in aes_key]\n # Return the array of bytes\n # print(\"encrypted_aes_key type:\", type(cipher))\n return cipher\n\n\ndef aes_encrypt(rsa_key, in_filename, output_file_name):\n aes_key = os.urandom(16)\n aes = AES.new(aes_key, AES.MODE_CBC)\n\n # print(\"aes key: \", aes_key)\n # print(\"aes key length:\", len(aes_key))\n # print(\"aes key type: \", type(aes_key))\n\n # get message\n text1_file = open(in_filename, \"r\")\n # it's a string\n lines = text1_file.read()\n\n encrypted_text = aes.encrypt(pad(lines))\n # print(\"encrypted text_type: \", type(encrypted_text))\n # print(\"encrypted text\", encrypted_text)\n # print(\"encrypted text length: \", len(encrypted_text))\n\n decode_encrypted_text = b64encode(encrypted_text).decode('utf-8')\n # print(\"this is decode encrypt text: \", decode_encrypted_text)\n # print(len(decode_encrypted_text))\n\n # before using rsa to encrypt, decode the bytes first\n decode_aes_key = b64encode(aes_key).decode('utf-8')\n # print(\"decode aes key: \", decode_aes_key)\n # print(\"decoded aes key type: \", type(decode_aes_key))\n\n # encrypting the aes key\n encrypted_key = rsa_encrypt(rsa_key, decode_aes_key)\n\n entire_message = encrypted_key, 'packet', decode_encrypted_text\n\n filename = \"%s\" % output_file_name\n FILE = open(filename, \"w\")\n entire_message = str(entire_message)\n FILE.writelines(entire_message)\n FILE.close()\n\n print(\"Finish Encryption\")\n\n # print(encrypted_text)\n # print(entire_message)\n\n\nif '-e' in sys.argv:\n flag_index = sys.argv.index('-e')\n\n flag_index += 1\n rsa_key_filename = (sys.argv[flag_index])\n # get message\n rsa_key_file = open(rsa_key_filename, \"r\")\n # it's a string\n rsa_key_lines = rsa_key_file.read()\n\n flag_index += 1\n plaintext_filename = (sys.argv[flag_index])\n flag_index += 1\n output_filename = (sys.argv[flag_index])\n\n aes_encrypt(rsa_key_lines, plaintext_filename, output_filename)\n\n\ndef rsa_decrypt(rsa_key, encrypted_key):\n rsa_key = rsa_key.strip(string.punctuation)\n rsa_key = rsa_key.split(\",\")\n\n d = int(rsa_key[0])\n n = int(rsa_key[1])\n\n # print(type(encrypted_key))\n encrypted_key = encrypted_key.strip(string.punctuation)\n encrypted_key = encrypted_key.replace(\" \", \"\")\n encrypted_key = encrypted_key.split(',')\n\n encrypted_key = list(map(int, encrypted_key))\n\n plain = [chr((char ** d) % n) for char in encrypted_key]\n\n aes_key = ''.join(plain)\n\n encode_aes_key = base64.b64decode(aes_key.encode('utf-8'))\n\n # print(\"rsa decrypt result(aes key): \", encode_aes_key)\n # print(len(encode_aes_key))\n # print(type(encode_aes_key))\n\n # Return the array of bytes as a string\n\n return encode_aes_key\n\n\ndef aes_decrypt(rsa_key, cipher_file, output_filename):\n # get message\n cipher_file = open(cipher_file, \"r\")\n # it's a string\n cipher_lines = cipher_file.read()\n cipher = cipher_lines.split(\", 'packet', \")\n\n encrypted_key = cipher[0]\n\n encrypted_key = encrypted_key.split('(')\n encrypted_key = encrypted_key[1]\n\n # print(\"encrypted key: \", encrypted_key)\n # print(\"encrypted key type: \", type(encrypted_key))\n\n aes_key = rsa_decrypt(rsa_key, encrypted_key)\n\n ct_content = cipher[1]\n # print(\"this is ct_content: \", ct_content)\n ct_content = ct_content[1:-2]\n # print(\"this is ct_content: \", ct_content)\n encode_ct_content = base64.b64decode(ct_content.encode('utf-8'))\n # print(encode_ct_content)\n # print(len(encode_ct_content))\n # print(type(encode_ct_content))\n\n aes = AES.new(aes_key, AES.MODE_CBC)\n decrypted_text = aes.decrypt(encode_ct_content).decode('latin-1')\n decrypted_text = decrypted_text[16:]\n # print(\"decrypted text:\", decrypted_text)\n # print(type(decrypted_text))\n\n filename = \"%s\" % output_filename\n FILE = open(filename, \"w\")\n FILE.writelines(decrypted_text)\n FILE.close()\n\n print(\"Finished decrypting!\")\n\n\nif '-d' in sys.argv:\n flag_index = sys.argv.index('-d')\n\n flag_index += 1\n rsa_key_filename = (sys.argv[flag_index])\n # get message\n rsa_key_file = open(rsa_key_filename, \"r\")\n # it's a string\n rsa_key_lines = rsa_key_file.read()\n\n flag_index += 1\n cipher_filename = (sys.argv[flag_index])\n\n flag_index += 1\n output_filename = (sys.argv[flag_index])\n\n aes_decrypt(rsa_key_lines, cipher_filename, output_filename)\n\n\n","sub_path":"crypt.py","file_name":"crypt.py","file_ext":"py","file_size_in_byte":5001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"523693037","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 1 09:33:07 2018\n\n@author: Alu\n\"\"\"\n\nDay_before=100\nSize_test = 50 \n \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn import metrics\n\ndataset = pd.read_csv('data.csv')\ndataset_train = dataset[Size_test:]\ndataset_test = dataset[:Size_test]\n \n \n \n################################################# Preprocess Train ######################################\n \n \ntraining_set = dataset_train.iloc[:, 4:5].values\n \nfrom sklearn.preprocessing import MinMaxScaler\nsc = MinMaxScaler(feature_range = (0, 1))\ntraining_set_scaled = sc.fit_transform(training_set)\n \nX_train = []\ny_train = []\nfor i in range(len(training_set)-Day_before):\n X_train.append(training_set_scaled[i+1:i+Day_before +1, 0])\n y_train.append(training_set_scaled[i, 0])\nX_train, y_train = np.array(X_train), np.array(y_train)\n \n \n################################################### Fit Model #############################################\n \n \n \nfrom sklearn.ensemble import RandomForestRegressor\nregressor = RandomForestRegressor(n_estimators = 30)\nregressor.fit(X_train, y_train)\n \n#from sklearn import model_selection\n##model_selection.cross_val_score(regressor,X_train,y_train,cv=5,scoring='r2')\n#\n#\n#parameters = [{'n_estimators': [10,20,30,40], 'criterion': ['mae','mse'], 'max_features': [\"sqrt\", \"log2\", \"auto\"]}]\n# \n#\n#grid_search=model_selection.GridSearchCV(regressor,param_grid=parameters,scoring='r2',cv=10)\n#grid_search = grid_search.fit(X_train, y_train)\n#best_accuracy = grid_search.best_score_\n#best_parameters = grid_search.best_params_\n\n################################################ Preprocess Testset ########################################\n \n \nreal_stock_price = dataset_test.iloc[:, 4:5].values.reshape(1,-1)[0]\ndataset_total = pd.concat((dataset_test['close'] , dataset_train['close']), axis = 0)\ninputs = dataset_total[:len(dataset_test) + Day_before].values\ninputs = inputs.reshape(-1,1)\ninputs = sc.transform(inputs)\nX_test = []\nfor i in range(len(inputs)-Day_before):\n X_test.append(inputs[i+1:i+Day_before+1, 0])\nX_test = np.array(X_test)\n \n################################################### Predict #################################################\n \npredicted_stock_price = regressor.predict(X_test)\npredicted_stock_price = sc.inverse_transform(predicted_stock_price.reshape(-1,1)).reshape(1,-1)[0]\n \n \n \n############################################## Evalutate ##############################################\n \n \ndef MSE(y_pred,y_test):\n return (np.sqrt(np.mean((y_pred-y_test)**2)))\n \n \nprint(metrics.r2_score(real_stock_price, predicted_stock_price))\n\n############################################## Visualising ############################################\n\n\n\nplt.plot(real_stock_price, color = 'red', label = 'Real Google Stock Price')\nplt.plot(predicted_stock_price, color = 'blue', label = 'Predicted Google Stock Price')\nplt.title('Google Stock Price Prediction')\nplt.xlabel('Time')\nplt.ylabel('Google Stock Price')\nplt.legend()\nplt.show()\n\n#money=1000\n#for i in range (Size_test-1):\n# curent_real = real_stock_price[Size_test-i-1]\n# pred = predicted_stock_price[Size_test-i-2]\n# next_real = real_stock_price[Size_test-i-2]\n# B_pred = (pred-curent_real)/curent_real\n# B_real = (next_real-curent_real)/curent_real\n# if B_pred*B_real >0 :\n# money = money*(1+abs(B_real))\n# else :\n# money = money*(1-abs(B_real))\n# print (money)\n","sub_path":"Regressor_simple/RF_Reg.py","file_name":"RF_Reg.py","file_ext":"py","file_size_in_byte":3622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"378392163","text":"import pickle\nimport os\nfrom pprint import pprint\nimport pandas as pd\nimport simplejson\n\n# get current working directory\ndir = os.getcwd()\n\n# function to get the dictionary of countries and their repective country code\ndef get_country_and_code(country_only=False):\n # load csv into dataframe\n data = pd.read_csv(dir+'/src/lib/csv/co2_emission.csv')\n \n # country_only is true then return only list of unique countries\n if country_only:\n country = data.Entity.unique()\n return country\n # else create a dictionary of country and their repective code\n country_and_code = dict(zip(data.Entity, data.Code))\n return country_and_code\n\n\n# function to get prediction data for given year for given year range\ndef prediction_per_country(country_name, year_lower_range, year_upper_range):\n # initialise data list\n data_list = list()\n # get the country with code dict\n country_and_code = get_country_and_code()\n \n # loop for each year and get respective prediction data for that year\n for i in range(year_lower_range, year_upper_range+1):\n # read the model pickle file (load the model)\n model = pickle.load(open(dir+'/prediction/model_resource/{}.pkl'.format(country_name.lower()), 'rb'))\n \n # use loaded model to predict the given year\n co_data = model.predict([[i]])\n \n # fit this prediction data into the required dict format\n data = {\n 'Entity':country_name,\n 'Code':country_and_code[country_name],\n 'Year':str(i),\n 'Annual CO2 emissions (tonnes )':str(co_data[0]),\n }\n # append this dict to data list which is to be returned by the function\n data_list.append(data)\n return data_list\n\n\n# function to gather the overall prediction data for all countries\ndef overall_prediction(year_lower_range, year_upper_range):\n # get the list of all countries\n country_list = get_country_and_code()\n # initialize the list\n data_list = list()\n try:\n # iterate through each country in country_list\n for country in country_list:\n # get the prediction data for given country\n country_data = prediction_per_country(country, year_lower_range, year_upper_range)\n # add this to result list\n data_list += country_data\n # return the result list\n return data_list\n except Exception as e:\n print('Error in getting country prediction data and error is: ' + str(e))\n \n\n# function to save the prediction data of all countries into the file\ndef save_prediction_file(year_lower_range, year_upper_range, path=dir+'/src/lib/'):\n # call overall_prediction for required year range\n country_prediction_data = overall_prediction(year_lower_range, year_upper_range)\n pprint(country_prediction_data)\n \n # open the file to save the prediction data so that it will reflect in the web page\n file = open(path+'worldjsonWithPrediction.js', 'a')\n \n # write the data by converting it to json\n file.write(simplejson.dumps(country_prediction_data, ignore_nan=True))\n \n # close the file\n file.close()\n\n\n\n\nif __name__ == '__main__':\n \n # call the save_prediction_file for given year range\n save_prediction_file(2018, 2099)","sub_path":"prediction/model_prediction.py","file_name":"model_prediction.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"267636085","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('panel', '0036_auto_20160815_1855'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='fbouser',\n name='module_sms',\n field=models.IntegerField(default=0, choices=[(0, 'Brak dostępu'), (1, 'Tylko odczyt'), (2, 'Pełen dostęp'), (3, 'Dostęp NCR')], verbose_name='Moduł SMS'),\n ),\n ]\n","sub_path":"panel/migrations/0037_auto_20160830_1920.py","file_name":"0037_auto_20160830_1920.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"32003569","text":"import tkinter as tk\nimport sys\nfrom tcp_client import ChatClient\nchat_client = ChatClient()\n\nimport client_share\nclient_share.chat_client = chat_client\nclient_share.APP_TITLE = 'Student App'\n\nif __name__ == '__main__':\n login_app = client_share.LoginWindow()\n try:\n login_app.mainloop()\n except tk.TclError:\n pass\n except KeyboardInterrupt:\n login_app.quit()\n sys.exit()\n if not chat_client.online:\n print('Not in Login')\n sys.exit()\n\n app = client_share.MainApp()\n try:\n app.mainloop()\n except KeyboardInterrupt:\n app.quit()\n\n\n","sub_path":"client_student.py","file_name":"client_student.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"222550756","text":"from variations import variation_list, variable_list\nimport lfos\nfrom utils import polar, rect\nimport colorsys, itertools, operator\nimport xml.etree.cElementTree as ET\n\n\nclass Flames(object):\n def __init__(self, element=None, filename=None):\n self.flames = []\n if operator.xor(element, filename):\n raise ValueError('Need element or filename, not both or neither')\n if element:\n self.from_element(element)\n elif filename:\n self.from_file(filename)\n\n def from_element(self, element):\n if element.tag == 'flames':\n self.flames = [Flame(flame) for flame in element.findall('flame')]\n elif element.tag == 'flame':\n self.flames = [Flame(element)]\n else:\n #TODO: ParseError?\n raise ValueError('Needs to be in or ')\n\n def from_string(self, string):\n self.from_element(ET.fromstring(string))\n\n def from_file(self, filename):\n with open(filename, 'r') as f:\n self.from_string(f.read())\n\n def iter_flames(self):\n for flame in self.flames:\n yield flame\n\n def to_element(self):\n element = ET.Element('flames')\n element.extend([flame.to_element() for flame in self.iter_flames()])\n return element\n\n def to_string(self):\n return ET.tostring(self.to_element())\n\n def to_file(self, filename):\n with open(filename, 'w') as f:\n f.write(self.to_string())\n return True\n\n\nclass Flame(object):\n _never_write = (\n 'final',\n 'palette',\n 'xforms',\n 'name',\n 'scale',\n 'x_offset',\n 'y_offset',\n 'width',\n 'height',\n '_numx',\n )\n _defaults = {\n 'name': 'none',\n 'size': (720, 360),\n 'center': (0.0, 0.0),\n 'scale': 20,\n 'time': 0,\n 'rotate': 0.0,\n 'brightness': 4,\n 'gamma': 4,\n 'gamma_threshold': 0.04,\n 'background': (0., 0., 0.),\n 'vibrancy': 1,\n 'highlight_power': -1,\n 'interpolation_type': 'log',\n 'interpolation': 'linear',\n 'palette_mode': 'linear',\n 'oversample': 1,\n 'filter': 0.2,\n 'quality': 100,\n }\n\n def __init__(self, element=None):\n self.xforms = []\n self.final = None\n if element:\n self.from_element(element)\n else:\n for k, v in self._defaults.items():\n setattr(self, k, v)\n self.palette = Palette()\n\n def from_element(self, element):\n for k, v in element.items():\n try:\n if \" \" in v:\n #coefs, for instance, are stored as '# # # # # #' and need\n #need to be stored internally as floats.\n setattr(self, k, map(float, v.split()))\n else:\n #All numerical values should be stored as floats\n setattr(self, k, float(v))\n except ValueError:\n #If conversion to float won't work, keep value as string\n setattr(self, k, v)\n self.name = str(self.name)\n self.scale = self.scale * 100 / self.size[0]\n xml_xforms = element.findall('xform')\n self._numx = len(xml_xforms)\n for xform in xml_xforms:\n self.xforms.append(Xform(self, xform))\n for finalx in element.findall('finalxform'):\n if self.final is not None:\n #TODO: Create ParsingError?\n raise ValueError('More than one Final Xform')\n self.final = Xform(self, finalx)\n self.final.animate = False\n self.palette = Palette(self, element.findall('color'))\n\n def to_element(self):\n element = ET.Element('flame')\n #attributes\n for k, v in self._iter_attributes():\n if isinstance(v, basestring):\n pass\n elif hasattr(v, \"__iter__\"):\n #For tuple and list elements, make a space-delimited string\n v = \" \".join(str(i if i % 1 else int(i)) for i in v)\n else:\n #TODO: Might want to round here to truncate float precision errors\n v = v if v % 1 else int(v)\n element.set(k, v)\n #xforms\n element.extend(xform.to_string() for xform in self.xforms)\n #finalxform\n if self.final:\n element.apend(self.final.to_string())\n #colors\n element.append(self.palette.to_string())\n return element\n\n def to_string(self):\n return ET.tostring(self.to_element())\n\n def get_at(self, i):\n #TODO: Refactor as this is basically to_element\n element = ET.Element('flame')\n for k, v in self._iter_attributes():\n if isinstance(v, basestring):\n pass\n elif hasattr(v, \"__iter__\"):\n v = \" \".join(str(i if i % 1 else int(i)) for i in v)\n else:\n v = v if v % 1 else int(v)\n element.set(k, v)\n element.extend(xform.get_at(i) for xform in self.xforms)\n if self.final != None:\n element.apend(self.final.to_string())\n element.append(self.palette.to_string())\n return element\n\n def copy(self):\n return Flame(ET.fromstring(self.to_string()))\n\n def iter_xforms(self):\n for xform in self.xforms:\n yield xform\n\n def _iter_attributes(self):\n #This returns all writable attributes and the derived attribs\n return itertools.chain(\n (\n ('name', self.name),\n ('size', self.size),\n ('center', self.center),\n ('scale', self.scale * self.width * 0.01),\n ),\n ((k, v) for (k, v) in self.__dict__.iteritems()\\\n if k not in self._never_write)\n )\n\n @property\n def size(self):\n return self.width, self.height\n\n @property\n def center(self):\n return self.x_offset, self.y_offset\n\n\nclass Xform(object):\n _never_write = (\n '_parent',\n 'post',\n 'lfos',\n 'chaos',\n 'xx',\n 'xy',\n 'yx',\n 'yy',\n 'ox',\n 'oy',\n )\n _defaults = {\n 'coefs': (1., 0., 0., 1., 0., 0.),\n 'linear': 1,\n 'color': 0.,\n 'color_speed': 0.5,\n 'opacity': 1.,\n 'weight': 1.,\n }\n\n def __init__(self, parent, element=None):\n self._parent = parent\n self.lfos = []\n self.chaos = None\n self.post = None\n\n if element != None:\n self.from_element(element)\n else:\n for k, v in self._defaults.items():\n setattr(self, k, v)\n\n def from_element(self, element):\n for k, v in element.items():\n try:\n if \" \" in v:\n setattr(self, k, float(v))\n else:\n setattr(self, k, float(v))\n except ValueError:\n setattr(self, k, v)\n if element.get('chaos'):\n self.chaos = Chaos(self, element.get('chaos'))\n else:\n self.chaos = None\n if element.get('post'):\n self.post = PostXform(self, element.get('post'))\n else:\n self.post = None\n for lfo in element.findall('lfo'):\n self.lfos.append(LFO(self, lfo))\n\n def to_element(self, print_lfos=True):\n #need the right tag\n if self.isfinal():\n element = ET.Element('finalxform')\n else:\n element = ET.Element('xform')\n #coefs attr\n element.set('coefs', \"{0} {1} {2} {3} {4} {5}\".format(*self.coefs))\n #post attr\n if self.post:\n element.set('post', self.post.to_string())\n #chaos attr\n if self.chaos:\n element.set('chaos', self.chaos.to_string())\n #other attrs\n for k, v in self._iter_attributes():\n if isinstance(v, basestring):\n pass\n elif hasattr(v, \"__iter__\"):\n v = \" \".join(str(i if i % 1 else int(i)) for i in v)\n else:\n v = v if v % 1 else int(v)\n element.set(k, v)\n #lfos if necessary\n if self.lfos and print_lfos:\n for lfo in self.lfos:\n if lfo.is_active():\n element.append(lfo.to_string())\n return element\n\n def to_string(self, print_lfos=True):\n return ET.tostring(self.to_element(print_lfos))\n\n def copy(self):\n return Xform(ET.fromstring(self.to_string()))\n\n def get_at(self, i):\n xform = self.copy()\n if self.lfos:\n for lfo in self.lfos:\n if lfo.target in ['rotate', 'rotate_x', 'rotate_y', 'orbit']:\n method = getattr(xform, lfo.target)\n method(lfo.get_at(i))\n elif lfo.target in ['protate', 'protate_x', 'protate_y', 'porbit']:\n method = getattr(xform, lfo.target[1:])\n method(lfo.get_at(i))\n else:\n setattr(xform, lfo.target, (getattr(xform, lfo.target) + lfo.get_at(i)))\n return xform.to_element(print_lfos=False)\n\n def list_vars(self):\n return [i for i in variation_list if i in self.__dict__]\n\n def iter_lfos(self):\n for lfo in self.lfos:\n yield lfo\n\n def _iter_attributes(self):\n return ((k, v) for (k, v) in self.__dict__.iteritems() if k not in self._never_write)\n\n def isfinal(self):\n return self is self._parent.final\n\n def ispost(self):\n return type(self._parent) == Xform\n\n def add_post(self, p=(1, 0, 0, 1, 0, 0)):\n if not self.post:\n self.post = PostXform(self, post=p)\n else:\n self.post.coefs = p\n\n def add_lfo(self):\n self.lfos.append(LFO(self))\n\n def scale(self, v):\n self.xp = (self.xp[0]*v, self.xp[1])\n self.yp = (self.yp[0]*v, self.yp[1])\n\n def scale_x(self, v):\n self.xp = (self.xp[0]*v, self.xp[1])\n\n def scale_y(self, v):\n self.yp = (self.yp[0]*v, self.yp[1])\n\n def rotate(self, deg):\n self.xp = (self.xp[0], self.xp[1]+deg)\n self.yp = (self.yp[0], self.yp[1]+deg)\n\n def rotate_x(self, deg):\n self.xp = (self.xp[0], self.xp[1]+deg)\n\n def rotate_y(self, deg):\n self.yp = (self.yp[0], self.yp[1]+deg)\n\n def orbit(self, deg, pivot=(0, 0)):\n if pivot == (0, 0):\n self.op = (self.op[0], self.op[1]+deg)\n else:\n self.o = (self.o[0]-self.p[0], self.o[1]-self.p[1])\n self.op = (self.op[0], self.op[1]+deg)\n self.o = (self.o[0]+self.p[0], self.o[1]+self.p[1])\n\n @property\n def index(self):\n if self.isfinal():\n return None\n try:\n return self._parent.xforms.index(self)\n except (AttributeError, ValueError):\n return None\n\n @property\n def coefs(self):\n return (self.xx, self.xy, self.yx, self.yy, self.ox, self.oy)\n @coefs.setter\n def coefs(self, value):\n self.xx, self.xy, self.yx, self.yy, self.ox, self.oy = value\n\n @property\n def x(self):\n return self.xx, self.xy\n @x.setter\n def x(self, value):\n self.xx, self.xy = value\n\n @property\n def y(self):\n return self.yx, self.yy\n @y.setter\n def y(self, value):\n self.yx, self.yy = value\n\n @property\n def o(self):\n return self.ox, self.oy\n @o.setter\n def o(self, value):\n self.ox, self.oy = value\n\n @property\n def polars(self):\n return self.xp, self.yp, self.op\n @polars.setter\n def polars(self, value):\n self.xp, self.yp, self.op = value\n\n @property\n def xp(self):\n return polar((self.xx, self.xy))\n @xp.setter\n def xp(self, value):\n self.xx, self.xy = rect(value)\n\n @property\n def yp(self):\n return polar((self.yx, self.yy))\n @yp.setter\n def yp(self, value):\n self.yx, self.yy = rect(value)\n\n @property\n def op(self):\n return polar((self.ox, self.oy))\n @op.setter\n def op(self, value):\n self.ox, self.oy = rect(value)\n\n\nclass PostXform(Xform):\n _default = (1., 0., 0., 1., 0., 0.)\n\n def __init__(self, parent, element=None, post=None):\n self._parent = parent\n if element:\n self.from_element(element)\n elif post:\n self.coefs = post\n else:\n self.coefs = self._default\n\n def from_element(self, element):\n self.coefs = map(float, element.split())\n\n def to_string(self):\n return \"{0} {1} {2} {3} {4} {5}\".format(*self.coefs)\n\n def isactive(self):\n return self.coefs != (1, 0, 0, 1, 0, 1)\n\n\nclass Chaos(object):\n def __init__(self, parent, element=None):\n self._parent = parent\n if element:\n self.from_element(element)\n else:\n self.value = []\n for i in xrange(self._parent._numx):\n self.value.append(1)\n\n def from_element(self, element):\n self.value = map(float, element.split())\n\n def to_string(self):\n rtn = []\n if self.isactive():\n for v in self.value:\n rtn.extend([str(v), ' '])\n rtn.pop()\n return ''.join(rtn)\n \n def isactive(self):\n for v in self.value:\n if v != 1:\n return True\n return False\n\n\nclass LFO(object):\n _defaults = {\n 'target': None,\n 'freq': 1,\n 'shape': 'sin',\n 'amp': 0,\n 'phase': 0,\n }\n #TODO: generate from inspecting lfos?\n _shapes = [\n 'sin',\n 'saw_up',\n 'saw_down',\n 'square',\n 'triangle'\n ]\n _valid_targets = set.union(\n set(variation_list),\n set(variable_list),\n set([\n 'color',\n 'color_speed',\n 'weight',\n 'opacity',\n 'rotate',\n 'rotate_x',\n 'rotate_y',\n 'protate',\n 'protate_x',\n 'protate_y',\n 'orbit',\n 'porbit',\n ]))\n\n def __init__(self, parent, element=None):\n self._parent = parent\n if element:\n self.from_element(element)\n else:\n for (k, v) in self._defaults.items():\n setattr(self, k, v)\n\n def from_element(self, element):\n self.target = element.get('target', self._defaults['target'])\n self.freq = float(element.get('freq', self._defaults['freq']))\n self.shape = element.get('shape', self._defaults['shape'])\n self.amp = float(element.get('amp', self._defaults['amp']))\n self.phase = float(element.get('phase', self._defaults['phase']))\n\n def to_element(self):\n if self.isactive():\n element = ET.Element('lfo')\n for (k, v) in self._dict__.iteritems():\n element.set(k, v)\n return element\n return None\n\n def to_string(self):\n return ET.tostring(self.to_element())\n\n def get_at(self, i):\n if self.isactive():\n method = getattr(lfos, self._shape)\n return method(i*self.freq, self.amp, self.phase)\n else:\n return 0.\n\n @property\n def target(self):\n return self._target\n @target.setter\n def target(self, value):\n if value in self._valid_targets:\n self._target = value\n try:\n self._targetp = self._parent.__dict__[self.target]\n except KeyError:\n setattr(self._parent, value, 0.)\n else:\n raise ValueError('{0} is an invalid target'.format(value))\n\n @property\n def shape(self):\n return self._shape\n @shape.setter\n def shape(self, value):\n if value in self.shapes:\n self._shape = value\n else:\n raise ValueError('Invalid shape')\n\n\nclass Palette(object):\n def __init__(self, parent, element=None):\n self._parent = parent\n self.colors = []\n for i in xrange(256):\n self.colors.append(Color(i, self._parent))\n if element:\n self.from_element(element)\n\n def from_element(self, element):\n for color in element:\n self.colors[color._index] = Color(self._parent, element=element)\n\n def to_elements(self):\n return [color.to_element() for color in self.colors]\n\n\nclass Color(object):\n def __init__(self, parent, index=None, element=None):\n self._parent = parent\n self._index = index\n if element:\n self.from_element(element)\n else:\n self._color = (0, 0, 0)\n\n def from_element(self, element):\n self._index = int(element.get('index'))\n self.rgb = map(float, element.get('rgb', '0 0 0').split())\n\n def to_element(self, element):\n element = ET.Element('color')\n element.set('index', self._index)\n element.set('rgb', '{0} {1} {2}'.format(*self.rgb))\n return element\n\n @property\n def index(self):\n try:\n return self._parent.index(self)\n except (AttributeError, ValueError):\n return None\n\n @property\n def rgb(self):\n return self._color\n @rgb.setter\n def rgb(self, value):\n self._color = value\n\n @property\n def r(self):\n return self._color[0]\n @r.setter\n def r(self, value):\n self._color = value, self.g, self.b\n\n @property\n def g(self):\n return self._color[1]\n @g.setter\n def g(self, value):\n self._color = self.r, value, self.b\n\n @property\n def b(self):\n return self._color[2]\n @b.setter\n def b(self, value):\n self._color = self.r, self.g, value\n\n @property\n def hsv(self):\n return colorsys.rgb_to_hsv(*(x/255. for x in self.rgb))\n @hsv.setter\n def hsv(self, value):\n self._color = colorsys.hsv_to_rgb(*value)\n\n @property\n def h(self):\n return self.hsv[0]\n @h.setter\n def h(self, value):\n self._color = colorsys.hsv_to_rgb(value, self.s, self.v)\n\n @property\n def s(self):\n return self.hsv[1]\n @s.setter\n def s(self, value):\n self._color = colorsys.hsv_to_rgb(self.h, value, self.v)\n\n @property\n def v(self):\n return self.hsv[2]\n @v.setter\n def v(self, value):\n self._color = colorsys.hsv_to_rgb(self.h, self.s, value)\n\n","sub_path":"flame.py","file_name":"flame.py","file_ext":"py","file_size_in_byte":18822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"245473625","text":"import os\nimport sys\nimport logging\nimport gzip\nimport shutil\nimport requests\nimport numpy as np\nfrom easyquery import Query\nfrom astropy.coordinates import search_around_sky, SkyCoord\nfrom astropy.units import Quantity\n\nSPEED_OF_LIGHT = 299792.458 # in km/s\n\n\ndef get_empty_str_array(array_length, string_length=48):\n return np.chararray((array_length,), itemsize=string_length, unicode=False)\n\n\ndef get_logger(level='WARNING'):\n log = logging.getLogger()\n log.setLevel(level if isinstance(level, int) else getattr(logging, level))\n logFormatter = logging.Formatter('[%(levelname)-5.5s][%(asctime)s] %(message)s')\n consoleHandler = logging.StreamHandler(sys.stdout)\n consoleHandler.setFormatter(logFormatter)\n log.addHandler(consoleHandler)\n return log\n\n\ndef get_decals_viewer_image(ra, dec, pixscale=0.2, layer='sdssco', size=256, out=None):\n url = 'http://legacysurvey.org/viewer-dev/jpeg-cutout/?ra={ra}&dec={dec}&pixscale={pixscale}&layer={layer}&size={size}'.format(**locals())\n content = requests.get(url).content\n if out is not None:\n if not out.lower().endswith('.jpg'):\n out += '.jpg'\n with open(out, 'wb') as f:\n f.write(content)\n return content\n\n\ndef gzip_compress(path, out_path=None, delete_original=True):\n if out_path is None:\n out_path = path + '.gz'\n\n with open(path, 'rb') as f_in, gzip.open(out_path, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n\n if delete_original:\n os.unlink(path)\n\n return out_path\n\n\ndef join_table_by_coordinates(table, table_to_join,\n columns_to_join=None, columns_to_rename=None,\n max_distance=1.0/3600.0, missing_value=np.nan,\n table_ra_name='RA', table_dec_name='DEC',\n table_to_join_ra_name='RA',\n table_to_join_dec_name='DEC', unit='deg'):\n \"\"\"\n join two table by matching the sky coordinates\n\n Examples\n --------\n wise_cols = ('W1_MAG', 'W1_MAG_ERR', 'W2_MAG', 'W2_MAG_ERR')\n cols_rename = {'W1_MAG':'W1', 'W1_MAG_ERR':'W1ERR', 'W2_MAG':'W2', 'W2_MAG_ERR':'W2ERR'}\n join_table_by_coordinates(base, wise, wise_cols, cols_rename)\n \"\"\"\n\n t1 = table\n t2 = table_to_join\n\n ra1 = table_ra_name\n dec1 = table_dec_name\n ra2 = table_to_join_ra_name\n dec2 = table_to_join_dec_name\n\n idx1, idx2 = search_around_sky(SkyCoord(t1[ra1], t1[dec1], unit=unit),\n SkyCoord(t1[ra1], t1[dec1], unit=unit),\n Quantity(max_distance, unit=unit))[:2]\n\n n_matched = len(idx1)\n\n if n_matched:\n if columns_to_join is None:\n columns_to_join = t2.colnames\n\n if columns_to_rename is None:\n columns_to_rename = dict()\n\n if isinstance(missing_value, dict):\n missing_value_dict = missing_value\n missing_value = np.nan\n else:\n missing_value_dict = dict()\n\n for c2 in columns_to_join:\n c1 = columns_to_rename.get(c2, c2)\n if c1 not in t1:\n t1[c1] = missing_value_dict.get(c1, missing_value)\n t1[c1][idx1] = t2[c2][idx2]\n\n return n_matched\n\n\ndef fill_values_by_query(table, query, values_to_fill):\n \"\"\"\n\n Examples\n --------\n fill_values_by_query(table, 'OBJID == 1237668367995568266',\n {'SPEC_Z': 0.21068, 'TELNAME':'SDSS', 'MASKNAME':'SDSS'})\n \"\"\"\n mask = Query(query).mask(table)\n n_matched = np.count_nonzero(mask)\n\n if n_matched:\n for c, v in values_to_fill.items():\n table[c][mask] = v\n\n return n_matched\n","sub_path":"SAGA/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"170119861","text":"import os\n\nimport pytest\n\nfrom app import app\nfrom tests import helpers\nimport config\n\ndata_set = [\n ('postMessage.js', '200 OK', 'application/javascript', None, None, None),\n ('PostmeSSage.js', '200 OK', 'application/javascript', {'unexpected_param': 'unexpected'}, None, None),\n ('PostmeSSage.js', '200 OK', 'application/javascript', None, {'Content-Type': 'image/x-icon', 'charset': 'ISO-8859-1'}, None),\n ('PostmeSSage.js', '200 OK', 'application/javascript', None, None, {'Surprise': \"

HELLO

\"}),\n (u'PostmeSSage.js', '200 OK', 'application/javascript', None, None, None),\n (bytes('PostmeSSage.js'.encode('latin')), '200 OK', 'application/javascript', None, None, None),\n ('postMessage.js#', '200 OK', 'application/javascript', None, None, None),\n\n ('postMessage.js ', '404 NOT FOUND', 'text/html', None, None, None),\n ('postMessage.js/', '404 NOT FOUND', 'text/html', None, None, None),\n ('nonExisting.js', '404 NOT FOUND', 'text/html', None, None, None),\n ('postMessage,js', '404 NOT FOUND', 'text/html', None, None, None),\n ('postMessage..js', '404 NOT FOUND', 'text/html', None, None, None),\n ('postMessage;js', '404 NOT FOUND', 'text/html', None, None, None),\n ('js/postMessage.js', '404 NOT FOUND', 'text/html', None, None, None),\n (r'js\\postMessage.js', '404 NOT FOUND', 'text/html', None, None, None),\n]\n\n\ndef get_expected_file_length(file_name):\n if isinstance(file_name, bytes):\n file_name = file_name.decode('latin')\n\n file_name = file_name.replace('#', '')\n\n js_file_path = os.path.join(helpers.get_server_path(), config.PATH_JS, file_name)\n\n with open(js_file_path, 'rb') as f:\n js_file = f.read()\n return len(js_file)\n\n\n@pytest.mark.parametrize('path, expected_status, expected_mime_type, query_string, headers, data', data_set)\ndef test_send_js(path, expected_status, expected_mime_type, query_string, headers, data):\n\n kwargs = {k: v for k, v in locals().items() if k not in ['expected_status', 'expected_mime_type'] and v is not None}\n client = app.test_client()\n\n print('Test GET', kwargs)\n response = client.get(**kwargs)\n\n assert response.status == expected_status\n assert response.mimetype == expected_mime_type\n assert response.charset == 'utf-8'\n assert response.content_length == len(response.data)\n\n if expected_status == '200 OK':\n assert get_expected_file_length(kwargs['path']) == len(response.data)\n","sub_path":"tests/unit/test_send_js.py","file_name":"test_send_js.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"604572767","text":"import math\n\nb=[0,1,2,3,4,5,6,7,8]\na=[0,5,2,5,4,3,1,6,3]\nl=len(a)\ndp=[[] for i in range(l)]\nfor i in range(l):\n dp[i].append(a[i])\n minlast=a[i]\n j=1\n while i+2**j-1 16)):\n print('Stefan has gone out into the darkness.')\n return False\n sx, sy = (stephan - 1) % 4, (stephan - 1) // 4\n ghost_dirs = [ch for ch in \"NWES\" if ch not in house[ghost - 1]]\n if ghost % 4 == 1 and \"W\" in ghost_dirs:\n ghost_dirs.remove(\"W\")\n if not ghost % 4 and \"E\" in ghost_dirs:\n ghost_dirs.remove(\"E\")\n if ghost <= 4 and \"N\" in ghost_dirs:\n ghost_dirs.remove(\"N\")\n if ghost > 12 and \"S\" in ghost_dirs:\n ghost_dirs.remove(\"S\")\n\n ghost_dir, ghost_dist = \"\", 1000\n for d in ghost_dirs:\n new_ghost = ghost + DIRS[d]\n gx, gy = (new_ghost - 1) % 4, (new_ghost - 1) // 4\n dist = (gx - sx) ** 2 + (gy - sy) ** 2\n if ghost_dist > dist:\n ghost_dir, ghost_dist = d, dist\n elif ghost_dist == dist:\n ghost_dir += d\n ghost_move = choice(ghost_dir)\n ghost += DIRS[ghost_move]\n if ghost == stephan:\n print('The ghost caught Stephan.')\n return False\n print(\"Too many moves.\")\n return False\n\n assert check_solution(checkio,\n [\"\", \"S\", \"S\", \"\",\n \"E\", \"NW\", \"NS\", \"\",\n \"E\", \"WS\", \"NS\", \"\",\n \"\", \"N\", \"N\", \"\"]), \"1st example\"\n assert check_solution(checkio,\n [\"\", \"\", \"\", \"\",\n \"E\", \"ESW\", \"ESW\", \"W\",\n \"E\", \"ENW\", \"ENW\", \"W\",\n \"\", \"\", \"\", \"\"]), \"2nd example\"\n","sub_path":"node_graph.py","file_name":"node_graph.py","file_ext":"py","file_size_in_byte":5429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"181536887","text":"\"\"\"\nQuasi-inverse matrices for Tau and Galerkin methods\n\n@article{julien09,\ntitle = {Efficient multi-dimensional solution of PDEs using Chebyshev spectral methods},\njournal = {Journal of Computational Physics},\nvolume = {228},\nnumber = {5},\npages = {1480-1503},\nyear = {2009},\nissn = {0021-9991},\ndoi = {https://doi.org/10.1016/j.jcp.2008.10.043}\n}\n\"\"\"\nimport numpy as np\nfrom shenfun.matrixbase import SparseMatrix\n\n__all__ = ('QIGmat', 'QITmat')\n\nclass QImat(SparseMatrix):\n\n def __mul__(self, y):\n \"\"\"Returns copy of self.__mul__(y) <==> self*y\"\"\"\n if isinstance(y, SparseMatrix):\n return y.__quasi__(self)\n return SparseMatrix.__mul__(self, y)\n\nclass QIGmat(QImat):\n \"\"\"Quasi-inverse matrix for the Galerkin method\n\n Parameters\n ----------\n N : int\n The number of quadrature points\n\n \"\"\"\n def __init__(self, N):\n k = np.arange(N)\n d = {\n 0: 1/4/(k[2:]*(k[2:]-1)),\n 2: -1/2/(k[2:-2]**2-1),\n 4: 1/4/(k[2:-4]*(k[2:-4]+1))}\n SparseMatrix.__init__(self, d, (N-2, N-2))\n\nclass QITmat(QImat):\n \"\"\"Quasi-inverse matrix for the Tau method\n\n Parameters\n ----------\n N : int\n The number of quadrature points\n\n \"\"\"\n def __init__(self, N):\n k = np.arange(N)\n d = {\n -2: 1/4/(k[2:]*(k[2:]-1)),\n 0: np.zeros(N),\n 2: np.zeros(N-2)}\n d[0][2:-2] = -1/2/(k[2:-2]**2-1)\n d[2][2:-2] = 1/4/(k[2:-4]*(k[2:-4]+1))\n SparseMatrix.__init__(self, d, (N, N))\n","sub_path":"shenfun/chebyshev/quasi.py","file_name":"quasi.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"253690199","text":"# Utility for international phone numbers. \n# Functionality includes formatting, parsing and validation.\n# (based on the Java implementation)\n#\n# @author Teren Teh (teren.teh@gmail.com)\n#\n\n# import statements to require the various pieces\n# including the proto buffer SDK\nimport logging\n\nclass PhoneNumberUtil:\n # Constants\n\n # Class attributes\n # The minimum and maximum length of the national significant number.\n _MIN_LENGTH_FOR_NSN = 3;\n _MIN_LENGTH_FOR_NSN = 15;\n\n # This is the file prefix to the proto buffer\n META_DATA_FILE_PREFIX = \"/com/google/i18n/phonenumbers/data/PhoneNumberMetadataProto\"\n\n # Class name that maps country codes to region\n COUNTRY_CODE_TO_REGION_CODE_MAP_CLASS_NAME = \"CountryCodeToRegionCodeMap\"\n\n self.currentFilePrefix = META_DATA_FILE_PREFIX;\n\n # Used for logging purposes\n _LOGGER = logging.getLogger('PhoneNumberUtilLogger')\n\n # Mapping from a country code to the region codes which denote the country/region\n # represented by that country code. \n # When there are multiple countries sharing a country code, the one indicated with\n # \"isMainCountryForCode\" in the metadata should be first.\n self.countryCodeToRegionCodeMap = {} \n\n # Set of countries that the library supports\n self.supportedCountries = {}\n\n # Set of countries that share country code 1\n self.nanpaCountries = {}\n\n _NANPA_COUNTRY_CODE = 1;\n\n # The PLUS_SIGN signifies the international prefix\n PLUS_SIGN = '+'\n\n # These mappings map a character (key) to a specific digit that should replace it for\n # normalization purposes. Non-European digits that may be used in phone numbers are\n # mapped to a European equivalent.\n DIGIT_MAPPINGS = {}\n DIGIT_MAPPINGS['0'] = '0'\n DIGIT_MAPPINGS['\\uFF10'] = '0' # Fullwidth digit 0\n DIGIT_MAPPINGS['\\u0660'] = '0' # Arabic-indic digit 0\n DIGIT_MAPPINGS['1'] = '1'\n DIGIT_MAPPINGS['\\uFF11'] = '1' # Fullwidth digit 1\n DIGIT_MAPPINGS['\\u0661'] = '1' # Arabic-indic digit 1\n DIGIT_MAPPINGS['2'] = '2'\n DIGIT_MAPPINGS['\\uFF12'] = '2' # Fullwidth digit 2\n DIGIT_MAPPINGS['\\u0662'] = '2' # Arabic-indic digit 2\n DIGIT_MAPPINGS['2'] = '2'\n DIGIT_MAPPINGS['\\uFF13'] = '3' # Fullwidth digit 3\n DIGIT_MAPPINGS['\\u0663'] = '3' # Arabic-indic digit 3\n DIGIT_MAPPINGS['4'] = '4'\n DIGIT_MAPPINGS['\\uFF14'] = '4' # Fullwidth digit 4\n DIGIT_MAPPINGS['\\u0664'] = '4' # Arabic-indic digit 4\n DIGIT_MAPPINGS['5'] = '5'\n DIGIT_MAPPINGS['\\uFF15'] = '5' # Fullwidth digit 5\n DIGIT_MAPPINGS['\\u0665'] = '5' # Arabic-indic digit 5\n DIGIT_MAPPINGS['6'] = '2'\n DIGIT_MAPPINGS['\\uFF16'] = '6' # Fullwidth digit 6\n DIGIT_MAPPINGS['\\u0666'] = '6' # Arabic-indic digit 6\n DIGIT_MAPPINGS['7'] = '7'\n DIGIT_MAPPINGS['\\uFF17'] = '7' # Fullwidth digit 7\n DIGIT_MAPPINGS['\\u0667'] = '7' # Arabic-indic digit 7\n DIGIT_MAPPINGS['8'] = '8'\n DIGIT_MAPPINGS['\\uFF18'] = '8' # Fullwidth digit 8\n DIGIT_MAPPINGS['\\u0668'] = '8' # Arabic-indic digit 8\n DIGIT_MAPPINGS['9'] = '9'\n DIGIT_MAPPINGS['\\uFF12'] = '9' # Fullwidth digit 9\n DIGIT_MAPPINGS['\\u0662'] = '9' # Arabic-indic digit 9\n\n # Only upper-case variants of alpha characters are stored. This map is used for\n # converting letter-based numbers to their number equivalent. \n # e.g. 1-800-GOOGLE1 = 1-800-4664531\n _ALPHA_MAPPINGS = {}\n _ALPHA_MAPPINGS['A'] = '2'\n _ALPHA_MAPPINGS['B'] = '2'\n _ALPHA_MAPPINGS['C'] = '2'\n _ALPHA_MAPPINGS['D'] = '3'\n _ALPHA_MAPPINGS['E'] = '3'\n _ALPHA_MAPPINGS['F'] = '3'\n _ALPHA_MAPPINGS['G'] = '4'\n _ALPHA_MAPPINGS['H'] = '4'\n _ALPHA_MAPPINGS['I'] = '4'\n _ALPHA_MAPPINGS['J'] = '5'\n _ALPHA_MAPPINGS['K'] = '5'\n _ALPHA_MAPPINGS['L'] = '5'\n _ALPHA_MAPPINGS['M'] = '6'\n _ALPHA_MAPPINGS['N'] = '6'\n _ALPHA_MAPPINGS['O'] = '6'\n _ALPHA_MAPPINGS['P'] = '7'\n _ALPHA_MAPPINGS['Q'] = '7'\n _ALPHA_MAPPINGS['R'] = '7'\n _ALPHA_MAPPINGS['S'] = '7'\n _ALPHA_MAPPINGS['T'] = '8'\n _ALPHA_MAPPINGS['U'] = '8'\n _ALPHA_MAPPINGS['V'] = '8'\n _ALPHA_MAPPINGS['W'] = '9'\n _ALPHA_MAPPINGS['X'] = '9'\n _ALPHA_MAPPINGS['Y'] = '9'\n _ALPHA_MAPPINGS['Z'] = '9'\n\n _ALL_NORMALIZATION_MAPPINGS = dict(DIGIT_MAPPINGS.items() + _ALPHA_MAPPINGS.items())\n \n # A list of all country codes where national significant numbers (excluding any national prefix)\n # exist that start with a leading zero.\n LEADING_ZERO_COUNTRIES = frozenset([\n 39, # Italy\n 47, # Norway\n 225, # Cote d'Ivoire\n 227, # Niger\n 228, # Togo\n 241, # Gabon\n 379, # Vatican City\n ])\n","sub_path":"python/src/i18n/phonenumbers/phonenumberutil.py","file_name":"phonenumberutil.py","file_ext":"py","file_size_in_byte":4630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"133089158","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nimport os\nimport sys\nfrom pdfreactor.api import *\n\nfrom time import localtime, strftime, sleep\n\n# The content to render\nfileHandle = open(os.path.abspath(os.path.join(os.path.dirname(__file__), '../resources/contentPython.html')))\ncontent = fileHandle.read()\n\n# Create new PDFreactor instance\n# pdfReactor = PDFreactor(\"http://yourServer:9423/service/rest\")\npdfReactor = PDFreactor()\n\n# Creates today's date\ndate = strftime(\"%m/%d/%Y %H:%M:%S %p\", localtime())\n\n# Get base URL path\npath = os.getenv('REQUEST_URI')\n\n# If the environment variable was not found\nif not path:\n # try this one:\n path = os.environ['PATH_INFO']\n\n# Create a new PDFreactor configuration object\nconfig = {\n # Specify the input document\n 'document': content,\n # Set a base URL for images, style sheets, links\n 'baseURL': \"http://\" + os.getenv(\"HTTP_HOST\") + path,\n # Set an appropriate log level\n 'logLevel': PDFreactor.LogLevel.WARN,\n # Set the title of the created PDF\n 'title': \"Demonstration of the PDFreactor Python API\",\n # Set the author of the created PDF\n 'author': \"Myself\",\n # Enable links in the PDF document\n 'addLinks': True,\n # Enable bookmarks in the PDF document\n 'addBookmarks': True,\n # Set some viewer preferences\n 'viewerPreferences': [\n PDFreactor.ViewerPreferences.FIT_WINDOW,\n PDFreactor.ViewerPreferences.PAGE_MODE_USE_THUMBS\n ],\n # Add user style sheets\n 'userStyleSheets': [\n {\n 'content': \"@page {\"\n \"@top-center {\"\n \"content: 'PDFreactor Python API demonstration';\"\n \"}\"\n \"@bottom-center {\"\n \"content: 'Created on \" + date + \"';\"\n \"}\"\n \"}\"\n },\n {'uri': \"../../resources/common.css\"}\n ]\n}\n\ntry:\n # Convert document\n documentId = pdfReactor.convertAsync(config)\n\n while True:\n sleep(0.5)\n progress = pdfReactor.getProgress(documentId)\n\n if progress['finished']:\n break\n\n # Used to prevent newlines are converted to Windows newlines (\\n --> \\r\\n)\n # when using Python on Windows systems\n if sys.platform == \"win32\":\n import msvcrt\n msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)\n\n print(\"Content-Type: application/pdf\\n\")\n\n # Streaming is more efficient for larger documents\n pdfReactor.getDocumentAsBinary(documentId, sys.stdout)\nexcept Exception as e:\n # Not successful, print error and log\n print(\"Content-type: text/html\\n\\n\")\n print(\"

An Error Has Occurred

\")\n print(\"

\" + str(e) + \"

\")\n","sub_path":"docs/sample/async.py","file_name":"async.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"529579765","text":"# -*- coding: utf-8 -*-\n\nfrom django.contrib import admin\nfrom models import Page, ContentPage\n\n\nclass PageAdmin(admin.ModelAdmin):\n fieldsets = [\n (u\"Метаинформация\", {\n \"classes\": (\"grp-collapse grp-closed\",),\n \"fields\": [\n \"meta_description\", \"meta_keywords\", \"meta_title\"\n ],\n }),\n ]\n\n\nclass ContentPageAdmin(PageAdmin):\n fieldsets = [\n (u\"Новая страница\", {\n \"fields\": [\n \"title\", \"body\", \"template\", \"is_public\",\n ],\n }),\n ]\n fieldsets.insert(0, PageAdmin.fieldsets[0])\n list_display = (\"id\", \"title\", \"is_public\",)\n list_display_links = (\"id\", \"title\",)\n list_editable = (\"is_public\",)\n\n\nadmin.site.register(Page, PageAdmin)\nadmin.site.register(ContentPage, ContentPageAdmin)\n\n# EOF\n","sub_path":"apps/pages/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"84360856","text":"#coding:utf8\n\n# Copyright 2019 longpeng2008. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# If you find any problem,please contact us\n#\n# longpeng2008to2012@gmail.com \n#\n# or create issues\n# =============================================================================\nfrom __future__ import print_function, division\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.autograd import Variable\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport time\nimport os\nfrom net import simpleNet5\nfrom dataset import SegDataset\nfrom tensorboardX import SummaryWriter\nfrom torch.utils.data import DataLoader\nimport numpy as np\n\nwriter = SummaryWriter() #可视化\n\nbatchsize = 64\nepochs = 200\nimagesize = 256 #缩放图片大小\ncropsize = 224 #训练图片大小\ntrain_data_path = 'data/train.txt' #训练数��集\nval_data_path = 'data/val.txt' #验证数据集\n\n# 数据预处理\ndata_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])])\n\n\n# 图像分割数据集\ntrain_dataset = SegDataset(train_data_path,imagesize,cropsize,data_transform)\ntrain_dataloader = DataLoader(train_dataset, batch_size=batchsize, shuffle=True)\nval_dataset = SegDataset(val_data_path,imagesize,cropsize,data_transform)\nval_dataloader = DataLoader(val_dataset, batch_size=val_dataset.__len__(), shuffle=True)\n\nimage_datasets = {}\nimage_datasets['train'] = train_dataset\nimage_datasets['val'] = val_dataset\ndataloaders = {}\ndataloaders['train'] = train_dataloader\ndataloaders['val'] = val_dataloader\n\n# 定义网络,优化目标,优化方法\ndevice = torch.device('cpu')\nnet = simpleNet5().to(device)\ncriterion = nn.CrossEntropyLoss() #使用softmax loss损失,输入label是图片\noptimizer = optim.SGD(net.parameters(), lr=1e-1, momentum=0.9)\nscheduler = lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1) #每50个epoch,学习率衰减\n\nif not os.path.exists('checkpoints'):\n os.mkdir('checkpoints')\n\nfor epoch in range(1, epochs+1):\n print('Epoch {}/{}'.format(epoch, epochs - 1))\n for phase in ['train', 'val']:\n if phase == 'train':\n scheduler.step()\n net.train(True) # Set model to training mode\n else:\n net.train(False) # Set model to evaluate mode\n\n running_loss = 0.0\n running_accs = 0.0\n\n n = 0\n for data in dataloaders[phase]:\n imgs, labels = data\n img, label = imgs.to(device).float(), labels.to(device).float()\n output = net(img)\n loss = criterion(output, label.long()) #得到损失\n\n output_mask = output.cpu().data.numpy().copy()\n output_mask = np.argmax(output_mask, axis=1)\n y_mask = label.cpu().data.numpy().copy()\n acc = (output_mask == y_mask) #计算精度\n acc = acc.mean()\n\n optimizer.zero_grad()\n if phase == 'train':\n # 梯度置0,反向传播,参数更新\n loss.backward()\n optimizer.step()\n\n running_loss += loss.data.item()\n running_accs += acc\n n += 1\n\n epoch_loss = running_loss / n\n epoch_acc = running_accs / n\n\n if phase == 'train':\n writer.add_scalar('data/trainloss', epoch_loss, epoch)\n writer.add_scalar('data/trainacc', epoch_acc, epoch)\n print('train epoch_{} loss='+str(epoch_loss).format(epoch))\n print('train epoch_{} acc='+str(epoch_acc).format(epoch))\n else:\n writer.add_scalar('data/valloss', epoch_loss, epoch)\n writer.add_scalar('data/valacc', epoch_acc, epoch)\n print('val epoch_{} loss='+str(epoch_loss).format(epoch))\n print('val epoch_{} acc='+str(epoch_acc).format(epoch))\n\n\n if epoch % 10 == 0:\n\n torch.save(net, 'checkpoints/model_epoch_{}.pth'.format(epoch))\n print('checkpoints/model_epoch_{}.pth saved!'.format(epoch))\n\nwriter.export_scalars_to_json(\"./all_scalars.json\")\nwriter.close()\n","sub_path":"computer_vision/projects/segmentation/pytorch/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"316193350","text":"# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------------------\n#\n# Copyright 2018-2019 Fetch.AI Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ------------------------------------------------------------------------------\n\n\"\"\"This module contains the handler for the 'http_echo' skill.\"\"\"\n\nimport json\nfrom typing import cast\n\nfrom aea.protocols.base import Message\nfrom aea.skills.base import Handler\n\nfrom packages.fetchai.protocols.http.message import HttpMessage\n\n\nclass HttpHandler(Handler):\n \"\"\"This implements the echo handler.\"\"\"\n\n SUPPORTED_PROTOCOL = HttpMessage.protocol_id\n\n def setup(self) -> None:\n \"\"\"\n Implement the setup.\n\n :return: None\n \"\"\"\n pass\n\n def handle(self, message: Message) -> None:\n \"\"\"\n Implement the reaction to an envelope.\n\n :param message: the message\n :return: None\n \"\"\"\n http_msg = cast(HttpMessage, message)\n if http_msg.performative == HttpMessage.Performative.REQUEST:\n self.context.logger.info(\n \"[{}] received http request with method={}, url={} and body={!r}\".format(\n self.context.agent_name,\n http_msg.method,\n http_msg.url,\n http_msg.bodyy,\n )\n )\n if http_msg.method == \"get\":\n self._handle_get(http_msg)\n elif http_msg.method == \"post\":\n self._handle_post(http_msg)\n else:\n self.context.logger.info(\n \"[{}] received response ({}) unexpectedly!\".format(\n self.context.agent_name, http_msg\n )\n )\n\n def _handle_get(self, http_msg: HttpMessage) -> None:\n \"\"\"\n Handle a Http request of verb GET.\n\n :param http_msg: the http message\n :return: None\n \"\"\"\n http_response = HttpMessage(\n dialogue_reference=http_msg.dialogue_reference,\n target=http_msg.message_id,\n message_id=http_msg.message_id + 1,\n performative=HttpMessage.Performative.RESPONSE,\n version=http_msg.version,\n status_code=200,\n status_text=\"Success\",\n headers=http_msg.headers,\n bodyy=json.dumps({\"tom\": {\"type\": \"cat\", \"age\": 10}}).encode(\"utf-8\"),\n )\n self.context.logger.info(\n \"[{}] responding with: {}\".format(self.context.agent_name, http_response)\n )\n http_response.counterparty = http_msg.counterparty\n self.context.outbox.put_message(message=http_response)\n\n def _handle_post(self, http_msg: HttpMessage) -> None:\n \"\"\"\n Handle a Http request of verb POST.\n\n :param http_msg: the http message\n :return: None\n \"\"\"\n http_response = HttpMessage(\n dialogue_reference=http_msg.dialogue_reference,\n target=http_msg.message_id,\n message_id=http_msg.message_id + 1,\n performative=HttpMessage.Performative.RESPONSE,\n version=http_msg.version,\n status_code=200,\n status_text=\"Success\",\n headers=http_msg.headers,\n bodyy=b\"\",\n )\n self.context.logger.info(\n \"[{}] responding with: {}\".format(self.context.agent_name, http_response)\n )\n http_response.counterparty = http_msg.counterparty\n self.context.outbox.put_message(message=http_response)\n\n def teardown(self) -> None:\n \"\"\"\n Implement the handler teardown.\n\n :return: None\n \"\"\"\n pass\n","sub_path":"packages/fetchai/skills/http_echo/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"402417323","text":"#!/usr/bin/python3\n\nimport socket\nimport select\nimport sys\nimport struct\nimport datetime\nimport json\n\nimport clientsession\nimport http_server\n\ncs = None\n\nclass ControlServer:\n def __init__(self):\n self.running = False\n self.read_s = [] #All the sockets control server is listening on - but not http\n self.session_list = [] #Established sessions\n self.http = None #a webserver\n try:\n self.listen_s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.listen_s.bind((\"0.0.0.0\", 12345))\n self.listen_s.listen(5)\n print(\"Server is running at {}\".format(self.listen_s.getsockname()))\n self.running = True\n #Add server socket to list of readables\n self.read_s.append(self.listen_s)\n \n except OSError as e:\n print(\"Could not bind server\")\n print(\"{}\".format(e))\n self.listen_s.close()\n \n #main loop that does the listening\n def main_loop(self):\n\n while(self.running):\n try:\n r, w, ex = select.select(self.read_s + self.http.read_s, [], [])\n except KeyboardInterrupt:\n self.shutdown()\n\n for s in r:\n #Control Server listen socket has new connection\n if(s == self.listen_s):\n (client_s, address) = self.listen_s.accept()\n new_session = clientsession.ClientSession(client_s, address)\n self.session_list.append(new_session)\n self.read_s.append(client_s)\n\n #Webserver listen socket has new connection \n elif(s == self.http.listen_s):\n print(\"http\")\n (client_s, address) = self.http.listen_s.accept()\n self.http.read_s.append(client_s)\n print(\"HTTP: New connection from {}:{}\".format(address[0], address[1]))\n\n #Webserver listen socket has read data, so pass onto webserver\n elif(s in self.http.read_s):\n data = s.recv(1024)\n if(data):\n self.http.handle(data, s)\n else:\n print(\"HTTP session closed\")\n self.http.read_s.remove(s)\n\n #Otherwise one of the connected sockets has read data\n elif(s in self.read_s):\n cur_session = getSession(s)\n \n #if session doesn't exist, remove socket from read list\n if(cur_session == None):\n self.read_s.remove(s)\n s.close()\n continue\n\n try:\n i = 0\n data = s.recv(1024)\n if(data):\n #A single recv may return multiple packets stuck together as \n #this is a streaming socket. Packets are prefixed with their\n #length as an unsigned short. So extract that msglen, and \n #differentiate between individual messages\n while(i < len(data)):\n msglen = struct.unpack(\">H\", data[i:i+2])[0]\n i += 2\n handleRequest(data[i:i+msglen], cur_session)\n i += msglen\n\n #receiving no data means socket is closed\n else:\n closeSession(s)\n \n except socket.error:\n closeSession(s)\n\n\n #Attach a webserver which can display info\n def attachWebserver(self, webserver):\n self.http = webserver\n self.read_s.append(webserver.listen_s)\n\n #Shutdown the server\n def shutdown(self):\n print(\"Shutting down server, closing connections\")\n self.listen_s.close()\n self.http.listen_s.close()\n sys.exit()\n\n#pass in a socket, searches control server's session list and returns session \ndef getSession(s):\n for session in cs.session_list:\n if(s == session.sock):\n return(session)\n return(None)\n\n#clean up code to run when session exits\ndef closeSession(s):\n cur_session = getSession(s)\n if(cur_session is not None):\n print(\"Session {}:{} ended\".format(cur_session.addr[0], cur_session.addr[1]))\n cs.session_list.remove(cur_session)\n s.close()\n cs.read_s.remove(s)\n\n#When data successfully received from a session socket\ndef handleRequest(d, cur_session):\n d = d.decode(\"utf-8\")\n\n #debugging stuff\n if(cur_session.name is not None):\n display_name = \"[{}]\".format(cur_session.name)\n else:\n display_name = \"[{}:{}]\".format(cur_session.addr[0], cur_session.addr[1])\n\n #decod json and look up what action it is\n packet = json.loads(d)\n action = packet[\"action\"]\n\n if(action == \"register\"):\n cur_session.name = packet[\"name\"]\n print(\"{}: Registered as {}\".format(display_name, cur_session.name))\n\n #location is sent as a LIST of TUPLES (float(x), float(y), int(unixtime))\n elif(action == \"location\"):\n loc_list = packet[\"coordinates\"]\n cur_session.location = loc_list[0]\n for l in loc_list:\n timestamp = datetime.datetime.fromtimestamp(int(l[2])).strftime(\"%d/%m/%Y %H:%M:%S\")\n print(\"{}: Location is {}, {} at {}\".format(display_name, l[0], l[1], timestamp))\n else:\n print(\"{}: Unknown action '{}'\".format(display_name, action))\n \n\n\nif(__name__ == \"__main__\"):\n print(\"Starting server...\") \n cs = ControlServer()\n ws = http_server.SimpleWebServer(cs.session_list)\n\n if(cs.listen_s is None or ws.listen_s is None):\n print(\"Exiting...\")\n sys.exit(1)\n\n print(\"Attaching webserver\")\n cs.attachWebserver(ws)\n\n print(\"Starting main loop\")\n cs.main_loop()\n\n\n\n","sub_path":"control_server/control_server.py","file_name":"control_server.py","file_ext":"py","file_size_in_byte":5651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"319770702","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 16 19:24:51 2019\n\n@author: SruthiPasumarthy\n\"\"\"\nfrom keras.preprocessing import sequence\nfrom keras.utils import np_utils\nfrom keras.layers import Embedding\nfrom keras.models import Sequential\nfrom keras.layers.recurrent import LSTM\nfrom keras.layers.core import Dropout, Activation, Dense\nfrom keras.layers.wrappers import TimeDistributed\nfrom keras.layers.core import Lambda\nimport keras.backend as K\nfrom keras.optimizers import Adam, RMSprop\nimport tensorflow as tf\n#import os\nimport numpy as np\nimport constants\nimport embedding\n\nimport random, sys\nimport Levenshtein\n\n\nw2v, VOCAB_SIZE, MAX_SEQ_LEN, word2index, index2word, index2index = embedding.createEmbeddingModel(constants.inputFile, constants.embeddingsMetadataFilePath, constants.embeddingsFilePath)\nprint('Word embeddings created')\nprint(\"VOCAB_SIZE: \",VOCAB_SIZE)\nprint(\"MAX_SEQ_LEN: \",MAX_SEQ_LEN)\nprint(\"word2index: \",len(word2index))\nprint(\"index2word: \",len(index2word))\nprint(\"index2index: \",len(index2index))\n\nsess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\nprint(sess)\n\nX_train, X_test, Y_train, Y_test = embedding.splitData()\n\nCONTENT_SEQ_LEN = MAX_SEQ_LEN - constants.DESC_SEQ_LEN\n\ndef createModel(embeddings, VOCAB_SIZE, MAX_SEQ_LEN):\n embeddingLayer = Embedding(VOCAB_SIZE,\n constants.EMBED_DIMENSION,\n weights=[embeddings],\n input_length = MAX_SEQ_LEN,\n mask_zero = True,\n name = 'embeddingLayer1')\n\n model = Sequential()\n model.add(embeddingLayer)\n \n for i in range(constants.RNN_LAYERS):\n print(\"Layer \",str(i+1))\n lstm = LSTM(constants.RNN_SIZE,\n return_sequences = True,\n name = \"LSTM\"+str(i+1))\n model.add(lstm)\n model.add(Dropout(0,name=\"Dropout\"+str(i+1)))\n \n #print(\"Keras model\") \n return model\n \nclass SimpleContext(Lambda):\n \"\"\"Class to implement `simple_context` method as a Keras layer.\"\"\"\n\n def __init__(self, fn, rnn_size, **kwargs):\n \"\"\"Initialize SimpleContext.\"\"\"\n self.rnn_size = rnn_size\n super(SimpleContext, self).__init__(fn, **kwargs)\n self.supports_masking = True\n\n def compute_mask(self, input, input_mask):\n \"\"\"Compute mask of CONTENT_SEQ_LEN.\"\"\"\n return input_mask[:, CONTENT_SEQ_LEN:]\n\n def compute_output_shape(self, input_shape):\n \"\"\"Get output shape for a given `input_shape`.\"\"\"\n nb_samples = input_shape[0]\n n = 2 * (self.rnn_size - constants.ACTIVATION_RNN_SIZE)\n return (nb_samples, constants.DESC_SEQ_LEN, n) \n\ndef simple_context(X, mask, n=constants.ACTIVATION_RNN_SIZE):\n\n content, descr = X[:,:CONTENT_SEQ_LEN,:], X[:,CONTENT_SEQ_LEN:,:]\n descr_activations, descr_words = descr[:,:,:n], descr[:,:,n:]\n content_activations, content_words = content[:,:,:n], content[:,:,n:]\n \n activation_energies = K.batch_dot(descr_activations, content_activations, axes=(2, 2))\n\n activation_energies = activation_energies + -1e20 * K.expand_dims(1. - K.cast(mask[:, :CONTENT_SEQ_LEN], 'float32'), 1)\n\n activation_energies = K.reshape(activation_energies, (-1, CONTENT_SEQ_LEN))\n activation_weights = K.softmax(activation_energies)\n activation_weights = K.reshape(activation_weights, (-1, constants.DESC_SEQ_LEN, CONTENT_SEQ_LEN))\n\n desc_avg_word = K.batch_dot(activation_weights, content_words, axes=(2, 1))\n return K.concatenate((desc_avg_word, descr_words))\n \n\n \n\nmodel = createModel(w2v, VOCAB_SIZE, MAX_SEQ_LEN) \n \nif constants.ACTIVATION_RNN_SIZE:\n model.add(SimpleContext(simple_context, constants.RNN_SIZE, name='simple_context1'))\n\nmodel.add(TimeDistributed(Dense(\n VOCAB_SIZE,\n W_regularizer=None,\n b_regularizer=None,\n name='timedistributed_1')))\nmodel.add(Activation('softmax', name='activation_1'))\n\nmodel.compile(loss='categorical_crossentropy', optimizer=constants.OPTIM)\n\nprint('Keras model created')\n\n\nK.set_value(model.optimizer.lr,np.float32(constants.LR))\n\ndef str_shape(x):\n return 'x'.join(map(str,x.shape))\n\ndef prt(label, x):\n print(label+':'),\n for w in x:\n print(index2word[w]),\n print\n \ndef inspect_model(model):\n for i,l in enumerate(model.layers):\n print(i, 'cls=%s name=%s'%(type(l).__name__, l.name))\n weights = l.get_weights()\n for weight in weights:\n print(str_shape(weight)),\n print\n\ninspect_model(model)\n# To write in different file\ndef lpadd(x, maxContentLength=CONTENT_SEQ_LEN, eos=constants.eos):\n \"\"\"left (pre) pad a description to CONTENT_SEQ_LEN and then add eos.\n The eos is the input to predicting the first word in the headline\n \"\"\"\n try:\n assert maxContentLength >= 0\n except:\n print(\"Assertion error in lpadd---- proceed\")\n if maxContentLength == 0:\n return [eos]\n n = len(x)\n #print('x',x)\n print(\"n: len(x)\",len(x))\n if n > maxContentLength:\n print(x[-maxContentLength:])\n x = x[-maxContentLength:]\n n = maxContentLength\n print('[empty]',[constants.empty])\n print('Updated x len ',len(x))\n print('[eos]',[eos])\n print('maxContentLength-n',maxContentLength-n)\n #print('[empty]*(maxContentLength-n) + x + [eos]',[empty]*(maxContentLength-n) + x + [eos])\n return [constants.empty]*(maxContentLength-n) + x + [eos]\n\nsamples = [lpadd([3]*26)]\n\nprint(\"len(samples)\",len(samples))\n#print(\"[3]*26\",[3]*26)\n# pad from right (post) so the first CONTENT_SEQ_LEN will be description followed by headline\ndata = sequence.pad_sequences(samples, maxlen=MAX_SEQ_LEN, value=constants.empty, padding='post', truncating='post')\n\nnp.all(data[:,CONTENT_SEQ_LEN] == constants.eos)\n\ndata.shape,map(len, samples)\nprint(data.shape)\nprobs = model.predict(data, verbose=0, batch_size=1)\nprint('probs.shape',probs.shape)\n\n# variation to https://github.com/ryankiros/skip-thoughts/blob/master/decoding/search.py\ndef beamsearch(predict, start=[constants.empty]*CONTENT_SEQ_LEN + [constants.eos],\n k=1, maxsample=MAX_SEQ_LEN, use_unk=False, empty=constants.empty, eos=constants.eos, temperature=1.0):\n \"\"\"return k samples (beams) and their NLL scores, each sample is a sequence of labels,\n all samples starts with an `empty` label and end with `eos` or truncated to length of `maxsample`.\n You need to supply `predict` which returns the label probability of each sample.\n `use_unk` allow usage of `oov` (out-of-vocabulary) label in samples\n \"\"\"\n def sample(energy, n, temperature=temperature):\n \"\"\"sample at most n elements according to their energy\"\"\"\n n = min(n,len(energy))\n prb = np.exp(-np.array(energy) / temperature )\n res = []\n for i in range(n):\n z = np.sum(prb)\n r = np.argmax(np.random.multinomial(1, prb/z, 1))\n res.append(r)\n prb[r] = 0. # make sure we select each element only once\n return res\n\n dead_k = 0 # samples that reached eos\n dead_samples = []\n dead_scores = []\n live_k = 1 # samples that did not yet reached eos\n live_samples = [list(start)]\n live_scores = [0]\n\n while live_k:\n # for every possible live sample calc prob for every possible label \n probs = predict(live_samples, empty=empty)\n\n # total score for every sample is sum of -log of word prb\n cand_scores = np.array(live_scores)[:,None] - np.log(probs)\n cand_scores[:,empty] = 1e20\n #if not use_unk:\n #for i in range(nb_unknown_words):\n #cand_scores[:,VOCAB_SIZE - 1 - i] = 1e20\n live_scores = list(cand_scores.flatten())\n \n\n # find the best (lowest) scores we have from all possible dead samples and\n # all live samples and all possible new words added\n scores = dead_scores + live_scores\n ranks = sample(scores, k)\n n = len(dead_scores)\n ranks_dead = [r for r in ranks if r < n]\n ranks_live = [r - n for r in ranks if r >= n]\n \n dead_scores = [dead_scores[r] for r in ranks_dead]\n dead_samples = [dead_samples[r] for r in ranks_dead]\n \n live_scores = [live_scores[r] for r in ranks_live]\n \n print(\"probs shape[1]:\",probs.shape)\n\n # append the new words to their appropriate live sample\n #voc_size = probs.shape[1]\n #live_samples = [live_samples[r//voc_size]+[r%voc_size] for r in ranks_live]\n\n # live samples that should be dead are...\n # even if len(live_samples) == maxsample we dont want it dead because we want one\n # last prediction out of it to reach a headline of DESC_SEQ_LEN\n zombie = [s[-1] == eos or len(s) > maxsample for s in live_samples]\n \n # add zombies to the dead\n dead_samples += [s for s,z in zip(live_samples,zombie) if z]\n dead_scores += [s for s,z in zip(live_scores,zombie) if z]\n dead_k = len(dead_samples)\n print(\"Len of dead samples: \",dead_k)\n # remove zombies from the living \n live_samples = [s for s,z in zip(live_samples,zombie) if not z]\n live_scores = [s for s,z in zip(live_scores,zombie) if not z]\n live_k = len(live_samples)\n print(\"Len of live samples: \",live_k)\n\n return dead_samples + live_samples, dead_scores + live_scores\n\t\ndef keras_rnn_predict(samples, empty=constants.empty, model=model, maxlen=MAX_SEQ_LEN):\n \"\"\"for every sample, calculate probability for every possible label\n you need to supply your RNN model and MAX_SEQ_LEN - the length of sequences it can handle\n \"\"\"\n sample_lengths = map(len, samples)\n #print(\"Samples: \",samples)\n try:\n assert all(l > CONTENT_SEQ_LEN for l in sample_lengths)\n except:\n print(\"Assertion 1 error in keras_rnn_predict---- proceed\")\n try:\n assert all(l[CONTENT_SEQ_LEN] == constants.eos for l in samples)\n except:\n print(\"Assertion 2 error in keras_rnn_predict ---- proceed\")\n # pad from right (post) so the first CONTENT_SEQ_LEN will be description followed by headline\n data = sequence.pad_sequences(samples, maxlen=MAX_SEQ_LEN, value=empty, padding='post', truncating='post')\n probs = model.predict(data, verbose=0, batch_size=constants.BATCH_SIZE)\n return np.array([prob[sample_length-CONTENT_SEQ_LEN-1] for prob, sample_length in zip(probs, sample_lengths)])\n\t\ndef vocab_fold(xs):\n #Sruthi: No point to use since no words outside dict..\n \"\"\"convert list of word indexes that may contain words outside VOCAB_SIZE to words inside.\n If a word is outside, try first to use glove_idx2idx to find a similar word inside.\n If none exist then replace all accurancies of the same unknown word with <0>, <1>, ...\n \"\"\"\n xs = [index2index.get(x,x) for x in xs] \n # the more popular word is <0> and so on\n #outside = sorted([x for x in xs if x >= oov0])\n # if there are more than nb_unknown_words oov words then put them all in nb_unknown_words-1\n #outside = dict((x,VOCAB_SIZE-1-min(i, nb_unknown_words-1)) for i, x in enumerate(outside))\n #xs = [outside.get(x,x) for x in xs]\n return xs #Sru: returns list of embedding indices\n\t\ndef vocab_unfold(c,xs):\n # assume desc is the unfolded version of the start of xs\n unfold = {}\n for i, unfold_idx in enumerate(c):\n fold_idx = xs[i]\n #if fold_idx >= oov0:\n #unfold[fold_idx] = unfold_idx\n return [unfold.get(x,x) for x in xs] #Sru: returns list of indices \n\ndef gensamples(skips=2, k=10, batch_size=constants.BATCH_SIZE, short=True, temperature=1., use_unk=False):\n i = random.randint(0,len(X_test)-1)\n #print('DESC:',' '.join(index2word[w] for w in Y_test[i][:DESC_SEQ_LEN]))\n #print('CONTENT:',' '.join(index2word[w] for w in X_test[i][:CONTENT_SEQ_LEN]))\n sys.stdout.flush()\n\n print('DESCRIPTION:')\n x = X_test[i]\n samples = []\n if CONTENT_SEQ_LEN == 0:\n skips = [0]\n else:\n skips = range(min(CONTENT_SEQ_LEN,len(x)), max(CONTENT_SEQ_LEN,len(x)), abs(CONTENT_SEQ_LEN - len(x)) // skips + 1)\n \n for s in skips: \n start = lpadd(x[:s])\n fold_start = vocab_fold(start)\n print('Length of list of foldstart: ',len(list(fold_start)))\n sample, score = beamsearch(predict=keras_rnn_predict, start=fold_start, k=k, temperature=temperature, use_unk=use_unk) #k = 10 , use_unk = False, temperature = 1.\n try:\n assert all(s[CONTENT_SEQ_LEN] == constants.eos for s in sample)\n except:\n print(\"Assertion error in gensamples---- proceed\")\n samples += [(s,start,scr) for s,scr in zip(sample,score)]\n\n samples.sort(key=lambda x: x[-1])\n codes = []\n for sample, start, score in samples:\n code = ''\n words = []\n sample = vocab_unfold(start, sample)[len(start):]\n for w in sample:\n if w == constants.eos:\n break\n words.append(index2word[w])\n code += chr(w//(256*256)) + chr((w//256)%256) + chr(w%256)\n if short:\n distance = min([100] + [-Levenshtein.jaro(code,c) for c in codes])\n if distance > -0.6:\n print(score, ' '.join(words))\n # print '%s (%.2f) %f'%(' '.join(words), score, distance)\n else:\n print(score, ' '.join(words))\n codes.append(code)\n\t\t\n\t\n\n#pd.__path__\t\n#gensamples(skips=2, batch_size=constants.BATCH_SIZE, k=10, temperature=1.)\n \ndef flip_description(x, nflips=None, model=None, debug=False):\n \"\"\"given a vectorized input (after `pad_sequences`) flip some of the words in the second half (headline)\n with words predicted by the model\n \"\"\"\n if nflips is None or model is None or nflips <= 0:\n return x\n \n BATCH_SIZE = len(x)\n try:\n assert np.all(x[:,CONTENT_SEQ_LEN] == constants.eos)\n except:\n print(\"Assertion error in flip_description---- proceed\")\n probs = model.predict(x, verbose=0, batch_size=BATCH_SIZE)\n x_out = x.copy()\n for b in range(BATCH_SIZE):\n # pick locations we want to flip\n # 0...CONTENT_SEQ_LEN-1 are descriptions and should be fixed\n # CONTENT_SEQ_LEN is eos and should be fixed\n flips = sorted(random.sample(range(CONTENT_SEQ_LEN+1,MAX_SEQ_LEN), nflips))\n if debug and b < debug:\n print(b),\n for input_idx in flips:\n if x[b,input_idx] == constants.empty or x[b,input_idx] == constants.eos:\n continue\n # convert from input location to label location\n # the output at CONTENT_SEQ_LEN (when input is eos) is feed as input at CONTENT_SEQ_LEN+1\n label_idx = input_idx - (CONTENT_SEQ_LEN+1)\n prob = probs[b, label_idx]\n w = prob.argmax()\n #if w == empty: # replace accidental empty with oov\n #w = oov0\n if debug and b < debug:\n print('%s => %s'%(index2word[x_out[b,input_idx]],index2word[w])),\n x_out[b,input_idx] = w\n if debug and b < debug:\n print\n return x_out\n\t\n\ndef conv_seq_labels(xds, xhs, nflips=None, model=None, debug=False):\n \"\"\"description and hedlines are converted to padded input vectors. headlines are one-hot to label\"\"\"\n BATCH_SIZE = len(xhs)\n try:\n assert len(xds) == BATCH_SIZE\n except:\n print(\"Assertion error in conv_seq_labels---- proceed\")\n x = [vocab_fold(lpadd(xd)+xh) for xd,xh in zip(xds,xhs)] # the input does not have 2nd eos\n x = sequence.pad_sequences(x, maxlen=MAX_SEQ_LEN, value=constants.empty, padding='post', truncating='post')\n x = flip_description(x, nflips=nflips, model=model, debug=debug)\n \n y = np.zeros((BATCH_SIZE, constants.DESC_SEQ_LEN, VOCAB_SIZE))\n for i, xh in enumerate(xhs):\n xh = vocab_fold(xh) + [constants.eos] + [constants.empty]*constants.DESC_SEQ_LEN # output does have a eos at end\n xh = xh[:constants.DESC_SEQ_LEN]\n y[i,:,:] = np_utils.to_categorical(xh, VOCAB_SIZE)\n \n return x, y\n\t\ndef gen(Xd, Xh, index2index, vocab_size, index2word, batch_size=constants.BATCH_SIZE, nb_batches=None, nflips=None, model=None, debug=False, seed=constants.SEED):\n \"\"\"yield batches. for training use nb_batches=None\n for validation generate deterministic results repeating every nb_batches\n \n while training it is good idea to flip once in a while the values of the headlines from the\n value taken from Xh to value generated by the model.\n \"\"\"\n c = nb_batches if nb_batches else 0\n while True:\n xds = []\n xhs = []\n if nb_batches and c >= nb_batches:\n c = 0\n new_seed = random.randint(0, sys.maxsize)\n random.seed(c+123456789+seed)\n for b in range(constants.BATCH_SIZE):\n t = random.randint(0,len(Xd)-1)\n\n xd = Xd[t]\n s = random.randint(min(CONTENT_SEQ_LEN,len(xd)), max(CONTENT_SEQ_LEN,len(xd)))\n xds.append(xd[:s])\n \n xh = Xh[t]\n s = random.randint(min(constants.DESC_SEQ_LEN,len(xh)), max(constants.DESC_SEQ_LEN,len(xh)))\n xhs.append(xh[:s])\n\n # undo the seeding before we yield inorder not to affect the caller\n c+= 1\n random.seed(new_seed)\n\n yield conv_seq_labels(xds, xhs, nflips=nflips, model=model, debug=debug)\n\t\t\n\nr = next(gen(X_train, Y_train, batch_size=constants.BATCH_SIZE, index2index=index2index, vocab_size=VOCAB_SIZE, index2word=index2word))\ntraingen = gen(X_train, Y_train, batch_size=constants.BATCH_SIZE, nb_batches=None, nflips=constants.NUM_FLIPS, model=model, debug=False, index2index=index2index, vocab_size=VOCAB_SIZE, index2word=index2word)\nvalgen = gen(X_test, Y_test, batch_size=constants.BATCH_SIZE, nb_batches=len(X_test) // constants.BATCH_SIZE, nflips=None, model=None, debug=False, index2index=index2index, vocab_size=VOCAB_SIZE, index2word=index2word)\n\n# define callbacks for training\n#callbacks = [TensorBoard(\n # log_dir=os.path.join(config.path_logs, str(time.time())),\n # histogram_freq=2, write_graph=False, write_images=False)]\n\n# train model and save weights\nh = model.fit_generator(\n traingen, samples_per_epoch=len(X_train),\n nb_epoch=30, validation_data=valgen, nb_val_samples=len(X_test)\n)\nmodel.save_weights(constants.dataPath+'TrainingWeights.hdf5', overwrite=True)\n\nprint(\"Keras model trained & weights saved in \"+constants.dataPath+\"TrainingWeights.hdf5\")\n\n# print samples after training\ngensamples(\n skips=2,\n k=10,\n batch_size=constants.BATCH_SIZE,\n short=False,\n temperature=1.0,\n use_unk=False,\n model=model,\n data=(X_test, Y_test),\n index2word=index2word,\n index2index=index2index,\n vocab_size=VOCAB_SIZE,\n)\n\n \n \n \n \n \n","sub_path":"Summarization/summarizationModel.py","file_name":"summarizationModel.py","file_ext":"py","file_size_in_byte":18858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"454262010","text":"from PIL import Image\nimport cv2\nimport numpy\nfrom videostream import videostream\nimport zbarlight\nimport serial\nimport RPi.GPIO as GPIO\nimport time\nimport os\nimport subprocess\nfrom UARTThread import uartthread\nfrom databaseutils import *\ndef topil(img):\n return Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\ndef graytopil(img):\n return Image.fromarray(img)\ndef uartwrite(arr):\n uart.write(arr)\n\ndef detect():\n st=videostream()\n #st.getstreamer().set(3,1280)\n #st.getstreamer().set(4,720) \n st.start()\n t=uart.getnoqrtimes()\n for i in t:\n db.writestamp(\" \",str(i))\n try:\n while True:\n if(GPIO.input(switch)==GPIO.LOW):\n st.stop()\n raise KeyboardInterrupt\n t1 = cv2.getTickCount()\n ret, frame = st.read()\n if (not frame is None):\n #cv2.imshow('code', frame)\n code= zbarlight.scan_codes('qrcode', topil(frame))\n\n if code is not None:\n code=str(code)\n code=code[3:len(code)-4]\n print(code)\n codetogo='{'+code+'}'\n uart.write((\">\"+codetogo).encode())\n db.writestamp(code)\n print('Done!')\n break\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n break\n except KeyboardInterrupt:\n st.stop()\n st.stop()\n cv2.destroyAllWindows()\nswitch=37\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(switch,GPIO.IN)\n\nprint('started')\nuart=uartthread()\nuart.start()\ndb=database()\nwhile True:\n if(GPIO.input(switch)==GPIO.HIGH):\n detect()\n","sub_path":"RaspberryPiQRScan/QRPython.py","file_name":"QRPython.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"620911126","text":"import random\nfrom random import choice #az bein ye library faghat yejasho var dare\n\nbaby=[\"who are you?\",\"how old are you?\",\"where are you?\"]\nanswer=(\"a\")\nwhile answer != \"dont know\":\n \n #####A=random.randint(0,2) #yek ravesh k khodam raftam\n A= choice(baby)\n answer = input(A).lower()\n\nprint(\"okay\")\n \n","sub_path":"10Baby project.py","file_name":"10Baby project.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"19086818","text":"# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# [START app]\nimport datetime\nimport logging\nimport os\nimport json\n\nfrom flask import Flask, render_template, request, Response, json\nfrom flask_cors import CORS\nimport sqlalchemy\n\n# db_user = os.environ.get(\"DB_USER\")\n# db_pass = os.environ.get(\"DB_PASS\")\n# db_name = os.environ.get(\"DB_NAME\")\n# cloud_sql_connection_name = os.environ.get(\"CLOUD_SQL_CONNECTION_NAME\")\ncloud_sql_connection_name='constant-012920:us-central1:cs348demo-db'\n\n\napp = Flask(__name__)\nCORS(app)\n\nlogger = logging.getLogger()\naddedCourse = []\n# [START cloud_sql_mysql_sqlalchemy_create]\n# The SQLAlchemy engine will help manage interactions, including automatically\n# managing a pool of connections to your database\ndb = sqlalchemy.create_engine(\n # Equivalent URL:\n # mysql+pymysql://:@/?unix_socket=/cloudsql/\n sqlalchemy.engine.url.URL(\n drivername=\"mysql+pymysql\",\n username='root',\n password='password',\n database='random',\n query={\"unix_socket\": \"/cloudsql/{}\".format(cloud_sql_connection_name)},\n ),\n # ... Specify additional properties here.\n # [START_EXCLUDE]\n # [START cloud_sql_mysql_sqlalchemy_limit]\n # Pool size is the maximum number of permanent connections to keep.\n pool_size=5,\n # Temporarily exceeds the set pool_size if no connections are available.\n max_overflow=2,\n pool_timeout=30, # 30 seconds\n pool_recycle=1800, # 30 minutes\n # [END cloud_sql_mysql_sqlalchemy_lifetime]\n # [END_EXCLUDE]\n)\n\n#View All Course\n@app.route('/', methods=['GET','POST'])\ndef get_companies():\n error = None \n #select = request.form.get('comp_select') \n with db.connect() as conn:\n stmt = sqlalchemy.text(\n \"Select CONCAT(Program,CN) as coursename,section,time,location,instructor,rate,number_of_ratings from(Select *from enroll as T1 Left Outer join instructor as I1 ON T1.instructor = I1.name Left Outer join course as C1 ON T1.key_number = C1.key_num) AS K order by Program,CN,SUBSTRING(section, 1, 3),SUBSTRING(section, 5, 5);\"\n )\n #data = conn.execute(stmt)\n data = conn.execute(stmt).fetchall() \n #convert sql to json\n classes_as_dict = []\n #print(data)\n for class_data in data:\n class_as_dict = {\n 'coursename': class_data[0],\n 'section': class_data[1],\n 'time': class_data[2],\n 'location': class_data[3],\n 'instructor': class_data[4],\n 'rate': class_data[5],\n 'number_of_ratings':class_data[6],\n }\n classes_as_dict.append(class_as_dict)\n return json.dumps(classes_as_dict)\n\n#Select by Course Name\n@app.route('/course/', methods=['GET'])\ndef get_courses(course_name):\n error = None \n lst = course_name.split(\",\") \n group = lst[0]\n sec_id = lst[1]\n #select = request.form.get('comp_select') \n with db.connect() as conn:\n stmt = sqlalchemy.text(\n \"Select CONCAT(Program,CN) as coursename,section,time,location,instructor,rate,number_of_ratings from(Select *from enroll as T1 Left Outer join instructor as I1 ON T1.instructor = I1.name Left Outer join course as C1 ON T1.key_number = C1.key_num) AS K where Program =:Program and CN =:CN order by Program,CN,SUBSTRING(section, 1, 3),SUBSTRING(section, 5, 5);\"\n )\n #data = conn.execute(stmt)\n data = conn.execute(stmt,Program = group,CN =sec_id).fetchall() \n #convert sql to json\n classes_as_dict = []\n print(data)\n for class_data in data:\n class_as_dict = {\n 'coursename': class_data[0],\n 'section': class_data[1],\n 'time': class_data[2],\n 'location': class_data[3],\n 'instructor': class_data[4],\n 'rate': class_data[5],\n 'number_of_ratings':class_data[6],\n }\n classes_as_dict.append(class_as_dict)\n return json.dumps(classes_as_dict)\n\n#Select by Program Name\n\n@app.route('/courseprogram/', methods=['GET'])\ndef get_program(program):\n error = None \n #select = request.form.get('comp_select') \n with db.connect() as conn:\n stmt = sqlalchemy.text(\n \"Select CONCAT(Program,CN) as coursename,section,time,location,instructor,rate,number_of_ratings from(Select *from enroll as T1 Left Outer join instructor as I1 ON T1.instructor = I1.name Left Outer join course as C1 ON T1.key_number = C1.key_num) AS K where Program =:Program order by Program,CN,SUBSTRING(section, 1, 3),SUBSTRING(section, 5, 5);\"\n )\n #data = conn.execute(stmt)\n data = conn.execute(stmt,Program = program).fetchall() \n #convert sql to json\n classes_as_dict = []\n #print(data)\n for class_data in data:\n class_as_dict = {\n 'coursename': class_data[0],\n 'section': class_data[1],\n 'time': class_data[2],\n 'location': class_data[3],\n 'instructor': class_data[4],\n 'rate': class_data[5],\n 'number_of_ratings':class_data[6],\n }\n classes_as_dict.append(class_as_dict)\n return json.dumps(classes_as_dict)\n\n#Sort by Rate\n\n@app.route('/course/sort/program=§ion=', methods=['GET'])\ndef get_sort_courses(program,sec_value):\n error = None \n group = program\n sec_id = sec_value\n #select = request.form.get('comp_select') \n with db.connect() as conn:\n stmt = sqlalchemy.text(\n \"Select CONCAT(Program,CN) as coursename,section,time,location,instructor,rate,number_of_ratings from(Select *from enroll as T1 Left Outer join instructor as I1 ON T1.instructor = I1.name Left Outer join course as C1 ON T1.key_number = C1.key_num) AS K where Program=:Program and CN =:CN order by Program,CN,SUBSTRING(section, 1, 3),rate DESC;\"\n )\n #data = conn.execute(stmt)\n data = conn.execute(stmt,Program=group,CN =sec_id).fetchall() \n #convert sql to json\n classes_as_dict = []\n # print(sec_id)\n for class_data in data:\n class_as_dict = {\n 'coursename': class_data[0],\n 'section': class_data[1],\n 'time': class_data[2],\n 'location': class_data[3],\n 'instructor': class_data[4],\n 'rate': class_data[5],\n 'number_of_ratings':class_data[6],\n }\n classes_as_dict.append(class_as_dict)\n # print(json.dumps(classes_as_dict))\n return json.dumps(classes_as_dict)\n\naddedCourse = []\n\n#Login\n\n@app.route('/login', methods=['GET','POST'])\ndef login():\n username = request.args.get('username')\n password = request.args.get('password')\n classes_as_dict = []\n with db.connect() as conn:\n stmt = sqlalchemy.text(\"Select * from student;\")\n data = conn.execute(stmt).fetchall() \n for class_data in data:\n if class_data[0] == username and class_data[1] == password:\n classes_as_dict.append(True)\n return json.dumps(True)\n classes_as_dict.append(False)\n return json.dumps(False)\n\n#Add Course\n\n@app.route('/addCourse/', methods=['GET','POST'])\ndef addCourse(username):\n arr = request.data.decode(\"utf-8\")\n arr = json.loads(arr)\n print(arr)\n with db.connect() as conn:\n stmt = sqlalchemy.text(\n \"INSERT INTO shoppingchart (user, coursename, section, time,location,instructor,rate,number_of_ratings)\" \"VALUES (:user, :coursename, :section, :time,:location,:instructor,:rate,:number_of_ratings)\")\n for class_data in arr:\n conn.execute(stmt,user=username,coursename=class_data['coursename'],section=class_data['section'],time=class_data['time'],location=class_data['location'],instructor=class_data['instructor'],rate=class_data['rate'],number_of_ratings=class_data['number_of_ratings'])\n return arr\n\n#Delete Course\n\n@app.route('/deleteCourse/', methods=['GET','POST'])\ndef deleteCourse(username):\n arr = request.data.decode(\"utf-8\")\n arr = json.loads(arr)\n print(arr)\n with db.connect() as conn:\n stmt = sqlalchemy.text(\n \"DELETE FROM shoppingchart WHERE user=:user and coursename =:coursename and section=:section and time=:time and location=:location\")\n for class_data in arr:\n conn.execute(stmt,user=username,coursename=class_data['coursename'],section=class_data['section'],time=class_data['time'],location=class_data['location'])\n return arr\n\n#Edit Rate\n\n@app.route('/editRate', methods=['GET','POST'])\ndef editRate():\n arr = request.data.decode(\"utf-8\")\n arr = json.loads(arr)\n arr_list = arr[0]['coursename'].split()\n with db.connect() as conn:\n stmt = sqlalchemy.text(\n \"UPDATE instructor SET rate = (((rate*number_of_ratings)+:rate)/(number_of_ratings +1)),number_of_ratings = number_of_ratings+1 WHERE name=:instructor;\")\n for class_data in arr:\n conn.execute(stmt,rate=class_data['rate'],instructor=class_data['instructor']) \n stmt = sqlalchemy.text(\n \"UPDATE shoppingchart SET rate = (((rate*number_of_ratings)+:rate)/(number_of_ratings +1)),number_of_ratings = number_of_ratings+1 WHERE instructor=:instructor;\")\n for class_data in arr:\n conn.execute(stmt,rate=class_data['rate'],instructor=class_data['instructor'])\n\n stmt1 = sqlalchemy.text(\n \"Select CONCAT(Program,CN) as coursename,section,time,location,instructor,rate,number_of_ratings from(Select *from enroll as T1 Left Outer join instructor as I1 ON T1.instructor = I1.name Left Outer join course as C1 ON T1.key_number = C1.key_num) AS K WHERE Program =:Program and CN=:CN and section=:section and time=:time and location=:location;\")\n data = conn.execute(stmt1,Program=arr_list[0],CN = arr_list[1],section=class_data['section'],time=class_data['time'],location=class_data['location']).fetchall()\n #convert sql to json\n classes_as_dict = []\n # print(sec_id)\n for class_data in data:\n class_as_dict = {\n 'coursename': class_data[0],\n 'section': class_data[1],\n 'time': class_data[2],\n 'location': class_data[3],\n 'instructor': class_data[4],\n 'rate': class_data[5],\n 'number_of_ratings':class_data[6],\n }\n classes_as_dict.append(class_as_dict)\n print(json.dumps(classes_as_dict))\n return json.dumps(classes_as_dict)\n\n#View ShoppingChart\n\n@app.route('/Courseenroll/', methods=['GET'])\ndef get_selected_courses(username):\n error = None \n #select = request.form.get('comp_select') \n with db.connect() as conn:\n stmt = sqlalchemy.text(\n \"Select * from shoppingchart where user=:username;\"\n )\n #data = conn.execute(stmt)\n data = conn.execute(stmt,username =username).fetchall() \n #convert sql to json\n classes_as_dict = []\n for class_data in data:\n class_as_dict = {\n 'coursename': class_data[1],\n 'section': class_data[2],\n 'time': class_data[3],\n 'location': class_data[4],\n 'instructor': class_data[5],\n 'rate': class_data[6],\n 'number_of_ratings':class_data[7],\n }\n classes_as_dict.append(class_as_dict)\n # print(json.dumps(classes_as_dict))\n return json.dumps(classes_as_dict)\n\n\n\n@app.route('/register', methods=['GET','POST'])\ndef register():\n username = request.args.get('username')\n password = request.args.get('password')\n print(username)\n with db.connect() as conn:\n stmt = sqlalchemy.text(\n \"INSERT INTO student (username, password)\" \"VALUES (:username, :password)\")\n conn.execute(stmt,username=username,password=password)\n return json.dumps(True)\n\n\nif __name__=='__main__': \n app.run(debug=True)\n\n@app.errorhandler(500)\ndef server_error(e):\n # Log the error and stacktrace.\n logging.exception('An error occurred during a request.')\n return 'An internal error occurred.', 500\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"119555371","text":"#!/usr/bin/env python\n\nimport sys\nfrom spider.scraper import crawl\nfrom spider.compress import HuffmanTree\n\n\nif __name__ == '__main__':\n # Handle the program arguments\n start_url = sys.argv[1]\n limit = 1\n if len(sys.argv) == 3:\n limit = int(sys.argv[2])\n\n try:\n # Crawl n-links through the internet and give back the aggregated text\n text = crawl(start_url, limit)\n\n # Show word frequency of the results\n words = text.word_frequency()\n print(\"\\n{} words found\".format(len(words)))\n for word in sorted(words, key=words.__getitem__):\n print(\" + {}: {}\".format(word, words[word]))\n\n # Compress the normalized text using a huffman tree\n ht = HuffmanTree(text.normalized())\n\n # Print out the huffman codes\n print(\"\\n{}\\t{}\\t{}\".format(\"Letter\", \"Weight\", \"Huffman code\"))\n codes = ht.codes()\n for ch in sorted(codes, key=lambda c: len(codes[c])):\n print(\"{}\\t{}\\t{}\".format(ch, ht.freq[ch], codes[ch]))\n except Exception as e:\n print(\"ERROR: \" + str(e))\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"34075373","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n#Fazer um argumento na linha de comando. ex: -h = help -v = verbose\n\ntry:\n from time import time, sleep\n import socket\n from IPy import IP\n from colorama import Fore, Style\n import os\n import traceback\n import argparse\n vm_ex, vd_ex, cy_ex, a_ex, c_end = Fore.LIGHTRED_EX, Fore.LIGHTGREEN_EX, Fore.LIGHTCYAN_EX, Fore.LIGHTYELLOW_EX, Style.RESET_ALL\nexcept KeyboardInterrupt:\n print(f'\\n\\t{a_ex}Keyboard Interrupt... Bye!{c_end}\\n')\n raise SystemExit\nexcept:\n print(f'{a_ex}[!]{c_end} {a_ex}Failed to import the dependencies...{c_end} ' +\\\n f'{a_ex}Make sure to install all of the requirements{c_end} ' +\\\n f'{a_ex}and try again.{c_end}')\n raise SystemExit\n\n\nos.system('clear') #Limpa o bash\n\n\nprint(f'''\n{vm_ex}\ngithub.com/RodricBr\n _____ _ _ _ _ \n| __ \\ | | | | | | | | \n| |__) |__ _ __| |_| |__| | __ ___ _| | __\n| ___/ _ \\| '__| __| __ |/ _` \\ \\ /\\ / / |/ /\n| | | (_) | | | |_| | | | (_| |\\ V V /| < \n|_| \\___/|_| \\__|_| |_|\\__,_| \\_/\\_/ |_|\\_\\ {vd_ex}v3.0\\n\\t~#By: RodricBr\\n\\tInspiration by: Aleksa Tamburkovski{c_end}\n{c_end}\n''')\n\ndef scan(target):\n try:\n converted_ip = check_ip(target)\n # print(f'\\n{a_ex}[!] Recommended port range:\\n{c_end}1 ──› 1023 {a_ex}(May take a few minutes!){c_end}\\n')\n p_start = int(input(f'{cy_ex}[P¹] Insert starting port range number:\\n>>>{c_end} '))\n p_end = int(input(f'{cy_ex}[P²] Insert final port range number:\\n>>>{c_end} '))\n print(f'{cy_ex}[=] Scanning Ports {p_start} to {p_end} at ──› {c_end}' + str(target))\n except KeyboardInterrupt:\n print(f'\\n\\t{a_ex}Keyboard Interrupt... Bye!{c_end}\\n')\n\n for port in range(p_start, p_end + 1): #Mais comum: 80, 443, 21, 22, 110, 995, 143, 993, 25/26, 587, 3306, 2082, 2083, 2086, 3306\n scan_port(converted_ip, port)\n\ndef check_ip(ip):\n try:\n IP(ip)\n return(ip)\n except ValueError:\n return socket.gethostbyname(ip)\n\ndef get_banner(s):\n return s.recv(1024)\n\ntry:\n set_timeout = float(input(f'{cy_ex}[+] Insert timeout number{c_end} {a_ex}(Float nº recommended: 0.5){c_end}{cy_ex}:\\n>>> {c_end}'))\n print(f'{vd_ex}[T] Timeout number set ──› {c_end}{str(set_timeout)}')\nexcept KeyboardInterrupt:\n print(f'\\n\\t{a_ex}Keyboard Interrupt... Bye!{c_end}\\n')\n exit()\n\ndef scan_port(ipaddress, port):\n try:\n sock = socket.socket()\n sock.settimeout(set_timeout) # Timeout rapido: 0.5, normal: 2, tedioso: 10\n sock.connect((ipaddress, port))\n try:\n banner = get_banner(sock)\n try:\n print(f'{vd_ex}[+] Port {c_end}' + str(port) + f' {vd_ex}/tcp is open. Banner ──› {c_end}' + str(banner.decode().strip('\\n'))) #Banner\n except KeyboardInterrupt:\n print(f'\\n\\t{a_ex}Keyboard Interrupt... Bye!{c_end}\\n')\n except:\n try:\n print(f'{vd_ex}[+] Port {c_end}' + str(port) + f' {vd_ex}is open. Banner ──› {c_end}not found')\n except KeyboardInterrupt:\n print(f'\\n\\t{a_ex}Keyboard Interrupt... Bye!{c_end}\\n')\n except:\n pass\n # print(f'{vm_ex}[-] Port {c_end}' + str(port) + f' {vm_ex}is closed{c_end}') Mostrar portas fechadas\n\ntry:\n targets = input(f'{cy_ex}[+] Insert the Target/s{c_end} {a_ex}(Split Targets using \",\"){c_end}{cy_ex}:\\n>>> {c_end}')\nexcept KeyboardInterrupt:\n print(f'\\n\\t{a_ex}Keyboard Interrupt... Bye!{c_end}\\n')\n exit()\n\nif ',' in targets:\n for ip_addr in targets.split(','): # Dividindo(tirando a vírgula e dividindo as duas strings)\n scan(ip_addr.strip(' ')) # Tirando(tirando o espaço)\nelse:\n scan(targets)\n","sub_path":"porthawkv3.py","file_name":"porthawkv3.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"337502443","text":"# 入力フォームの設定\nfrom django import forms\nfrom .models import Information\nfrom django.contrib.auth.forms import AuthenticationForm\n\n\n# 情報フォーム\nclass InformationForm(forms.ModelForm):\n class Meta:\n model = Information\n fields = ('name', 'email', 'age', 'sex', 'memo')\n widgets = {\n 'name': forms.TextInput(attrs={'placeholder': '記入例:山田 太郎'}),\n 'email': forms.TextInput(attrs={'placeholder': '記入例:mail@mail.jp'}),\n 'age': forms.NumberInput(attrs={'min': 1}),\n 'sex': forms.RadioSelect(),\n 'memo': forms.Textarea(attrs={'rows': 4}),\n }\n\n\n# ログインフォーム\nclass LoginForm(AuthenticationForm):\n def __init__(self, *args, **kwargs):\n for field in self.fields.values():\n field.widget.attrs['class'] = 'form-control'\n field.widget.attrs['placeholder'] = field.label # placeholderにフィールドのラベルを入れる\n","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"584866228","text":"import signal\nimport asyncio\nimport threading\nfrom typing import Set, Union, Callable, Awaitable, cast\n\nfrom nonebot.log import logger\nfrom nonebot.drivers import Driver\nfrom nonebot.typing import overrides\nfrom nonebot.config import Env, Config\nfrom nonebot.utils import run_sync, is_coroutine_callable\n\nHOOK_FUNC = Union[Callable[[], None], Callable[[], Awaitable[None]]]\nHANDLED_SIGNALS = (\n signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.\n signal.SIGTERM, # Unix signal 15. Sent by `kill `.\n)\n\n\nclass BlockDriver(Driver):\n def __init__(self, env: Env, config: Config):\n super().__init__(env, config)\n self.startup_funcs: Set[HOOK_FUNC] = set()\n self.shutdown_funcs: Set[HOOK_FUNC] = set()\n self.should_exit: asyncio.Event = asyncio.Event()\n self.force_exit: bool = False\n\n @property\n @overrides(Driver)\n def type(self) -> str:\n \"\"\"驱动名称: `block_driver`\"\"\"\n return \"block_driver\"\n\n @property\n @overrides(Driver)\n def logger(self):\n \"\"\"block driver 使用的 logger\"\"\"\n return logger\n\n @overrides(Driver)\n def on_startup(self, func: HOOK_FUNC) -> HOOK_FUNC:\n \"\"\"\n 注册一个启动时执行的函数\n \"\"\"\n self.startup_funcs.add(func)\n return func\n\n @overrides(Driver)\n def on_shutdown(self, func: HOOK_FUNC) -> HOOK_FUNC:\n \"\"\"\n 注册一个停止时执行的函数\n \"\"\"\n self.shutdown_funcs.add(func)\n return func\n\n @overrides(Driver)\n def run(self, *args, **kwargs):\n \"\"\"启动 block driver\"\"\"\n super().run(*args, **kwargs)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(self.serve())\n\n async def serve(self):\n self.install_signal_handlers()\n await self.startup()\n if self.should_exit.is_set():\n return\n await self.main_loop()\n await self.shutdown()\n\n async def startup(self):\n # run startup\n cors = [\n cast(Callable[..., Awaitable[None]], startup)()\n if is_coroutine_callable(startup)\n else run_sync(startup)()\n for startup in self.startup_funcs\n ]\n if cors:\n try:\n await asyncio.gather(*cors)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"Error when running startup function. \"\n \"Ignored!\"\n )\n\n logger.info(\"Application startup completed.\")\n\n async def main_loop(self):\n await self.should_exit.wait()\n\n async def shutdown(self):\n logger.info(\"Shutting down\")\n\n logger.info(\"Waiting for application shutdown.\")\n # run shutdown\n cors = [\n cast(Callable[..., Awaitable[None]], shutdown)()\n if is_coroutine_callable(shutdown)\n else run_sync(shutdown)()\n for shutdown in self.shutdown_funcs\n ]\n if cors:\n try:\n await asyncio.gather(*cors)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"Error when running shutdown function. \"\n \"Ignored!\"\n )\n\n for task in asyncio.all_tasks():\n if task is not asyncio.current_task() and not task.done():\n task.cancel()\n await asyncio.sleep(0.1)\n\n tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]\n if tasks and not self.force_exit:\n logger.info(\"Waiting for tasks to finish. (CTRL+C to force quit)\")\n while tasks and not self.force_exit:\n await asyncio.sleep(0.1)\n tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]\n\n for task in tasks:\n task.cancel()\n\n await asyncio.gather(*tasks, return_exceptions=True)\n\n logger.info(\"Application shutdown complete.\")\n loop = asyncio.get_event_loop()\n loop.stop()\n\n def install_signal_handlers(self) -> None:\n if threading.current_thread() is not threading.main_thread():\n # Signals can only be listened to from the main thread.\n return\n\n loop = asyncio.get_event_loop()\n\n try:\n for sig in HANDLED_SIGNALS:\n loop.add_signal_handler(sig, self.handle_exit, sig, None)\n except NotImplementedError:\n # Windows\n for sig in HANDLED_SIGNALS:\n signal.signal(sig, self.handle_exit)\n\n def handle_exit(self, sig, frame):\n if self.should_exit.is_set():\n self.force_exit = True\n else:\n self.should_exit.set()\n","sub_path":"nonebot/drivers/_block_driver.py","file_name":"_block_driver.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"349122088","text":"#!/usr/bin/env python\n# -*- coding=utf-8 -*-\n\nfrom config import VIDEO_ID, USER_ID, NUM_WINNERS\nfrom helper import get_commenters, get_followers, draw\n\nif __name__ == '__main__':\n with open(\"cookies.txt\", \"r\") as f:\n raw = f.read().strip()\n cookies = dict(cookies_are=raw)\n commenters = get_commenters(VIDEO_ID)\n del commenters[USER_ID]\n followers = get_followers(USER_ID, cookies)\n pool = set(commenters.keys()) & set(followers.keys())\n winners = draw(pool, NUM_WINNERS)\n print(\"*************************************\")\n print(\"抽奖结果: \")\n for i in range(len(winners)):\n print(\"第{}位获奖观众: {} - https://space.bilibili.com/{}\".format(i +\n 1, followers[winners[i]], winners[i]))\n","sub_path":"FS/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"162199310","text":"import unittest\nfrom django.test import Client\nfrom rest_framework import status\nfrom facebook.models import FacebookUser\nfrom facebook.serializers import FacebookSerializer\n\n# Create your tests here.\nclass FacebookUserTestCase(unittest.TestCase):\n\tdef setUp(self):\n\t\tself.Client = Client()\n\t\tself.Client.post('/facebook/person/', {'facebookId' : '100000009410619'})\n\n\tdef test_post(self):\n\t\tdata = {'facebookId':'100000362029587', 'username':u'haroldotinoco', 'name':u'Haroldo Muylaert Tinoco', 'gender':u'male'}\n\t\tresponse = self.Client.post('/facebook/person/', {'facebookId' : '100000362029587'})\n\t\tserializa = FacebookSerializer(response.data)\n\t\t#Checa se postou\n\t\tself.assertEqual(serializa.data, data)\n\t\t#Checa response\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n\tdef test_list(self):\n\t\tresponse = self.Client.get('/facebook/person/')\n\t\t#Checa response\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\n\tdef test_list_limit(self):\n\t\tresponse = self.Client.get('/facebook/person/', {'limit' : '1'})\n\t\t#Checa response\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\t#Checa se quantidade retornada\n\t\tself.assertEqual(len(response.data), 1)\n\n\tdef test_delete(self):\n\t\tresponse = self.Client.delete('/facebook/person/100000009410619')\n\t\t#Checa response\n\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)","sub_path":"facebook/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"318237979","text":"#!/usr/bin/env python\n\"\"\"\n_MonteCarloFromGEN_t_\n\nUnit tests for the MonteCarloFromGEN workflow.\n\"\"\"\n\nimport unittest\nimport os\n\nfrom WMCore.Database.CMSCouch import CouchServer, Document\nfrom WMCore.WMBS.Fileset import Fileset\nfrom WMCore.WMBS.Subscription import Subscription\nfrom WMCore.WMBS.Workflow import Workflow\nfrom WMCore.WMSpec.StdSpecs.MonteCarloFromGEN import MonteCarloFromGENWorkloadFactory\nfrom WMCore.WorkQueue.WMBSHelper import WMBSHelper\n\nfrom WMQuality.TestInitCouchApp import TestInitCouchApp\n\n\nclass MonteCarloFromGENTest(unittest.TestCase):\n def setUp(self):\n \"\"\"\n _setUp_\n\n Initialize the database.\n \"\"\"\n self.testInit = TestInitCouchApp(__file__)\n self.testInit.setLogging()\n self.testInit.setDatabaseConnection()\n self.testInit.setupCouch(\"mclhe_t\", \"ConfigCache\")\n self.testInit.setSchema(customModules=[\"WMCore.WMBS\"],\n useDefault=False)\n\n couchServer = CouchServer(os.environ[\"COUCHURL\"])\n self.configDatabase = couchServer.connectDatabase(\"mclhe_t\")\n self.testDir = self.testInit.generateWorkDir()\n return\n\n def tearDown(self):\n \"\"\"\n _tearDown_\n\n Clear out the database.\n \"\"\"\n self.testInit.clearDatabase()\n self.testInit.tearDownCouch()\n self.testInit.delWorkDir()\n return\n\n def injectConfig(self):\n \"\"\"\n _injectConfig_\n\n Create a bogus config cache document and inject it into couch. Return\n the ID of the document.\n \"\"\"\n newConfig = Document()\n newConfig[\"info\"] = None\n newConfig[\"config\"] = None\n newConfig[\"md5hash\"] = \"eb1c38cf50e14cf9fc31278a5c8e580f\"\n newConfig[\"pset_hash\"] = \"7c856ad35f9f544839d8525ca10259a7\"\n newConfig[\"owner\"] = {\"group\": \"cmsdataops\", \"user\": \"sfoulkes\"}\n newConfig[\"pset_tweak_details\"] = {\"process\": {\"outputModules_\": [\"outputRECORECO\", \"outputALCARECOALCARECO\"],\n \"outputRECORECO\": {\"dataset\": {\"filterName\": \"FilterRECO\",\n \"dataTier\": \"RECO\"}},\n \"outputALCARECOALCARECO\": {\n \"dataset\": {\"filterName\": \"FilterALCARECO\",\n \"dataTier\": \"ALCARECO\"}}}}\n result = self.configDatabase.commitOne(newConfig)\n return result[0][\"id\"]\n\n def testMonteCarloFromGEN(self):\n \"\"\"\n _testMonteCarloFromGEN_\n\n Create a MonteCarloFromGEN workflow and verify it installs into WMBS\n correctly.\n \"\"\"\n arguments = MonteCarloFromGENWorkloadFactory.getTestArguments()\n arguments[\"ConfigCacheID\"] = self.injectConfig()\n arguments[\"CouchDBName\"] = \"mclhe_t\"\n arguments[\"PrimaryDataset\"] = \"WaitThisIsNotMinimumBias\"\n\n factory = MonteCarloFromGENWorkloadFactory()\n testWorkload = factory.factoryWorkloadConstruction(\"TestWorkload\", arguments)\n\n outputDatasets = testWorkload.listOutputDatasets()\n self.assertEqual(len(outputDatasets), 2)\n self.assertTrue(\"/WaitThisIsNotMinimumBias/FAKE-FilterRECO-FAKE-v1/RECO\" in outputDatasets)\n self.assertTrue(\"/WaitThisIsNotMinimumBias/FAKE-FilterALCARECO-FAKE-v1/ALCARECO\" in outputDatasets)\n\n productionTask = testWorkload.getTaskByPath('/TestWorkload/MonteCarloFromGEN')\n splitting = productionTask.jobSplittingParameters()\n self.assertFalse(splitting[\"deterministicPileup\"])\n\n testWMBSHelper = WMBSHelper(testWorkload, \"MonteCarloFromGEN\", \"SomeBlock\", cachepath=self.testDir)\n testWMBSHelper.createTopLevelFileset()\n testWMBSHelper._createSubscriptionsInWMBS(testWMBSHelper.topLevelTask, testWMBSHelper.topLevelFileset)\n\n procWorkflow = Workflow(name=\"TestWorkload\",\n task=\"/TestWorkload/MonteCarloFromGEN\")\n procWorkflow.load()\n\n self.assertEqual(len(procWorkflow.outputMap.keys()), 3,\n \"Error: Wrong number of WF outputs.\")\n self.assertEqual(procWorkflow.wfType, 'production')\n\n goldenOutputMods = [\"outputRECORECO\", \"outputALCARECOALCARECO\"]\n for goldenOutputMod in goldenOutputMods:\n mergedOutput = procWorkflow.outputMap[goldenOutputMod][0][\"merged_output_fileset\"]\n unmergedOutput = procWorkflow.outputMap[goldenOutputMod][0][\"output_fileset\"]\n\n mergedOutput.loadData()\n unmergedOutput.loadData()\n\n self.assertEqual(mergedOutput.name,\n \"/TestWorkload/MonteCarloFromGEN/MonteCarloFromGENMerge%s/merged-Merged\" % goldenOutputMod,\n \"Error: Merged output fileset is wrong: %s\" % mergedOutput.name)\n self.assertEqual(unmergedOutput.name, \"/TestWorkload/MonteCarloFromGEN/unmerged-%s\" % goldenOutputMod,\n \"Error: Unmerged output fileset is wrong.\")\n\n logArchOutput = procWorkflow.outputMap[\"logArchive\"][0][\"merged_output_fileset\"]\n unmergedLogArchOutput = procWorkflow.outputMap[\"logArchive\"][0][\"output_fileset\"]\n logArchOutput.loadData()\n unmergedLogArchOutput.loadData()\n\n self.assertEqual(logArchOutput.name, \"/TestWorkload/MonteCarloFromGEN/unmerged-logArchive\",\n \"Error: LogArchive output fileset is wrong.\")\n self.assertEqual(unmergedLogArchOutput.name, \"/TestWorkload/MonteCarloFromGEN/unmerged-logArchive\",\n \"Error: LogArchive output fileset is wrong.\")\n\n for goldenOutputMod in goldenOutputMods:\n mergeWorkflow = Workflow(name=\"TestWorkload\",\n task=\"/TestWorkload/MonteCarloFromGEN/MonteCarloFromGENMerge%s\" % goldenOutputMod)\n mergeWorkflow.load()\n\n self.assertEqual(len(mergeWorkflow.outputMap.keys()), 2,\n \"Error: Wrong number of WF outputs.\")\n\n mergedMergeOutput = mergeWorkflow.outputMap[\"Merged\"][0][\"merged_output_fileset\"]\n unmergedMergeOutput = mergeWorkflow.outputMap[\"Merged\"][0][\"output_fileset\"]\n\n mergedMergeOutput.loadData()\n unmergedMergeOutput.loadData()\n\n self.assertEqual(mergedMergeOutput.name,\n \"/TestWorkload/MonteCarloFromGEN/MonteCarloFromGENMerge%s/merged-Merged\" % goldenOutputMod,\n \"Error: Merged output fileset is wrong.\")\n self.assertEqual(unmergedMergeOutput.name,\n \"/TestWorkload/MonteCarloFromGEN/MonteCarloFromGENMerge%s/merged-Merged\" % goldenOutputMod,\n \"Error: Unmerged output fileset is wrong.\")\n\n logArchOutput = mergeWorkflow.outputMap[\"logArchive\"][0][\"merged_output_fileset\"]\n unmergedLogArchOutput = mergeWorkflow.outputMap[\"logArchive\"][0][\"output_fileset\"]\n logArchOutput.loadData()\n unmergedLogArchOutput.loadData()\n\n self.assertEqual(logArchOutput.name,\n \"/TestWorkload/MonteCarloFromGEN/MonteCarloFromGENMerge%s/merged-logArchive\" % goldenOutputMod,\n \"Error: LogArchive output fileset is wrong: %s\" % logArchOutput.name)\n self.assertEqual(unmergedLogArchOutput.name,\n \"/TestWorkload/MonteCarloFromGEN/MonteCarloFromGENMerge%s/merged-logArchive\" % goldenOutputMod,\n \"Error: LogArchive output fileset is wrong.\")\n\n topLevelFileset = Fileset(name=\"TestWorkload-MonteCarloFromGEN-SomeBlock\")\n topLevelFileset.loadData()\n\n procSubscription = Subscription(fileset=topLevelFileset, workflow=procWorkflow)\n procSubscription.loadData()\n\n self.assertEqual(procSubscription[\"type\"], \"Production\",\n \"Error: Wrong subscription type: %s\" % procSubscription[\"type\"])\n self.assertEqual(procSubscription[\"split_algo\"], \"EventAwareLumiBased\",\n \"Error: Wrong split algo.\")\n\n unmergedReco = Fileset(name=\"/TestWorkload/MonteCarloFromGEN/unmerged-outputRECORECO\")\n unmergedReco.loadData()\n recoMergeWorkflow = Workflow(name=\"TestWorkload\",\n task=\"/TestWorkload/MonteCarloFromGEN/MonteCarloFromGENMergeoutputRECORECO\")\n recoMergeWorkflow.load()\n mergeSubscription = Subscription(fileset=unmergedReco, workflow=recoMergeWorkflow)\n mergeSubscription.loadData()\n\n self.assertEqual(mergeSubscription[\"type\"], \"Merge\",\n \"Error: Wrong subscription type.\")\n self.assertEqual(mergeSubscription[\"split_algo\"], \"ParentlessMergeBySize\",\n \"Error: Wrong split algo: %s\" % mergeSubscription[\"split_algo\"])\n\n unmergedAlca = Fileset(name=\"/TestWorkload/MonteCarloFromGEN/unmerged-outputALCARECOALCARECO\")\n unmergedAlca.loadData()\n alcaMergeWorkflow = Workflow(name=\"TestWorkload\",\n task=\"/TestWorkload/MonteCarloFromGEN/MonteCarloFromGENMergeoutputALCARECOALCARECO\")\n alcaMergeWorkflow.load()\n mergeSubscription = Subscription(fileset=unmergedAlca, workflow=alcaMergeWorkflow)\n mergeSubscription.loadData()\n\n self.assertEqual(mergeSubscription[\"type\"], \"Merge\",\n \"Error: Wrong subscription type.\")\n self.assertEqual(mergeSubscription[\"split_algo\"], \"ParentlessMergeBySize\",\n \"Error: Wrong split algo: %s\" % mergeSubscription[\"split_algo\"])\n\n for procOutput in [\"outputRECORECO\", \"outputALCARECOALCARECO\"]:\n unmerged = Fileset(name=\"/TestWorkload/MonteCarloFromGEN/unmerged-%s\" % procOutput)\n unmerged.loadData()\n cleanupWorkflow = Workflow(name=\"TestWorkload\",\n task=\"/TestWorkload/MonteCarloFromGEN/MonteCarloFromGENCleanupUnmerged%s\" % procOutput)\n cleanupWorkflow.load()\n cleanupSubscription = Subscription(fileset=unmerged, workflow=cleanupWorkflow)\n cleanupSubscription.loadData()\n\n self.assertEqual(cleanupSubscription[\"type\"], \"Cleanup\",\n \"Error: Wrong subscription type.\")\n self.assertEqual(cleanupSubscription[\"split_algo\"], \"SiblingProcessingBased\",\n \"Error: Wrong split algo.\")\n\n procLogCollect = Fileset(name=\"/TestWorkload/MonteCarloFromGEN/unmerged-logArchive\")\n procLogCollect.loadData()\n procLogCollectWorkflow = Workflow(name=\"TestWorkload\",\n task=\"/TestWorkload/MonteCarloFromGEN/LogCollect\")\n procLogCollectWorkflow.load()\n logCollectSub = Subscription(fileset=procLogCollect, workflow=procLogCollectWorkflow)\n logCollectSub.loadData()\n\n self.assertEqual(logCollectSub[\"type\"], \"LogCollect\",\n \"Error: Wrong subscription type.\")\n self.assertEqual(logCollectSub[\"split_algo\"], \"MinFileBased\",\n \"Error: Wrong split algo.\")\n\n procLogCollect = Fileset(\n name=\"/TestWorkload/MonteCarloFromGEN/MonteCarloFromGENMergeoutputRECORECO/merged-logArchive\")\n procLogCollect.loadData()\n procLogCollectWorkflow = Workflow(name=\"TestWorkload\",\n task=\"/TestWorkload/MonteCarloFromGEN/MonteCarloFromGENMergeoutputRECORECO/MonteCarloFromGENoutputRECORECOMergeLogCollect\")\n procLogCollectWorkflow.load()\n logCollectSub = Subscription(fileset=procLogCollect, workflow=procLogCollectWorkflow)\n logCollectSub.loadData()\n\n self.assertEqual(logCollectSub[\"type\"], \"LogCollect\",\n \"Error: Wrong subscription type.\")\n self.assertEqual(logCollectSub[\"split_algo\"], \"MinFileBased\",\n \"Error: Wrong split algo.\")\n\n procLogCollect = Fileset(\n name=\"/TestWorkload/MonteCarloFromGEN/MonteCarloFromGENMergeoutputALCARECOALCARECO/merged-logArchive\")\n procLogCollect.loadData()\n procLogCollectWorkflow = Workflow(name=\"TestWorkload\",\n task=\"/TestWorkload/MonteCarloFromGEN/MonteCarloFromGENMergeoutputALCARECOALCARECO/MonteCarloFromGENoutputALCARECOALCARECOMergeLogCollect\")\n procLogCollectWorkflow.load()\n logCollectSub = Subscription(fileset=procLogCollect, workflow=procLogCollectWorkflow)\n logCollectSub.loadData()\n\n self.assertEqual(logCollectSub[\"type\"], \"LogCollect\",\n \"Error: Wrong subscription type.\")\n self.assertEqual(logCollectSub[\"split_algo\"], \"MinFileBased\",\n \"Error: Wrong split algo.\")\n\n return\n\n def testMCFromGENWithPileup(self):\n \"\"\"\n _testMonteCarloFromGEN_\n\n Create a MonteCarloFromGEN workflow and verify it installs into WMBS\n correctly.\n \"\"\"\n arguments = MonteCarloFromGENWorkloadFactory.getTestArguments()\n arguments[\"ConfigCacheID\"] = self.injectConfig()\n arguments[\"CouchDBName\"] = \"mclhe_t\"\n arguments[\"PrimaryDataset\"] = \"WaitThisIsNotMinimumBias\"\n\n # Add pileup inputs\n arguments[\"MCPileup\"] = \"/some/cosmics-procstringwhatever-v1/RAW\"\n arguments[\"DataPileup\"] = \"/some/minbias-procstringwhatever-v1/LHE\"\n arguments[\"DeterministicPileup\"] = True\n\n factory = MonteCarloFromGENWorkloadFactory()\n testWorkload = factory.factoryWorkloadConstruction(\"TestWorkload\", arguments)\n\n productionTask = testWorkload.getTaskByPath('/TestWorkload/MonteCarloFromGEN')\n cmsRunStep = productionTask.getStep(\"cmsRun1\").getTypeHelper()\n pileupData = cmsRunStep.getPileup()\n self.assertEqual(pileupData.data.dataset, [\"/some/minbias-procstringwhatever-v1/LHE\"])\n self.assertEqual(pileupData.mc.dataset, [\"/some/cosmics-procstringwhatever-v1/RAW\"])\n\n splitting = productionTask.jobSplittingParameters()\n self.assertTrue(splitting[\"deterministicPileup\"])\n\n def testMemCoresSettings(self):\n \"\"\"\n _testMemCoresSettings_\n\n Make sure the multicore and memory setings are properly propagated to\n all tasks and steps.\n \"\"\"\n defaultArguments = MonteCarloFromGENWorkloadFactory.getTestArguments()\n defaultArguments[\"ConfigCacheID\"] = self.injectConfig()\n defaultArguments[\"CouchDBName\"] = \"mclhe_t\"\n defaultArguments[\"PrimaryDataset\"] = \"WaitThisIsNotMinimumBias\"\n\n factory = MonteCarloFromGENWorkloadFactory()\n testWorkload = factory.factoryWorkloadConstruction(\"TestWorkload\", defaultArguments)\n\n # test default values\n taskObj = testWorkload.getTask('MonteCarloFromGEN')\n for step in ('cmsRun1', 'stageOut1', 'logArch1'):\n stepHelper = taskObj.getStepHelper(step)\n self.assertEqual(stepHelper.getNumberOfCores(), 1)\n self.assertEqual(stepHelper.getNumberOfStreams(), 0)\n # then test Memory requirements\n perfParams = taskObj.jobSplittingParameters()['performance']\n self.assertEqual(perfParams['memoryRequirement'], 2300.0)\n\n # now test case where args are provided\n defaultArguments[\"Multicore\"] = 6\n defaultArguments[\"Memory\"] = 4600.0\n defaultArguments[\"EventStreams\"] = 3\n testWorkload = factory.factoryWorkloadConstruction(\"TestWorkload\", defaultArguments)\n taskObj = testWorkload.getTask('MonteCarloFromGEN')\n for step in ('cmsRun1', 'stageOut1', 'logArch1'):\n stepHelper = taskObj.getStepHelper(step)\n if step == 'cmsRun1':\n self.assertEqual(stepHelper.getNumberOfCores(), defaultArguments[\"Multicore\"])\n self.assertEqual(stepHelper.getNumberOfStreams(), defaultArguments[\"EventStreams\"])\n else:\n self.assertEqual(stepHelper.getNumberOfCores(), 1)\n self.assertEqual(stepHelper.getNumberOfStreams(), 0)\n # then test Memory requirements\n perfParams = taskObj.jobSplittingParameters()['performance']\n self.assertEqual(perfParams['memoryRequirement'], defaultArguments[\"Memory\"])\n\n return\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/python/WMCore_t/WMSpec_t/StdSpecs_t/MonteCarloFromGEN_t.py","file_name":"MonteCarloFromGEN_t.py","file_ext":"py","file_size_in_byte":16483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"266115607","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\"\"\"\nA simple, one-off, experimental script to get OCLC Work IDs (OWIs) into a batch of bib records. Here, they went into 787$o.\nUses pymarc, libxml2 and xmllint.\nNOTE: There's a quota of 1,000 queries per day by default (this isn't immediately obvious). Check the following:\nhttp://oclc.org/developer/develop/linked-data/worldcat-entities/worldcat-work-entity.en.html\nhttp://www.oclc.org/developer/develop/web-services/xid-api.en.html\n\"\"\"\nfrom time import sleep\nimport libxml2\nimport os\nimport pickle\nimport pymarc\nimport requests\nimport shelve\nimport subprocess\nimport sys\n\nXID_RESOLVER = \"http://xisbn.worldcat.org/webservices/xid/oclcnum/%s\"\nWORK_ID = \"http://worldcat.org/entity/work/id/\"\nSHELF_FILE = \"./owi.db\"\n\ninfile = \"./input.marc.xml\"\noutfile = \"./output_w_owis.marc.xml\"\n\ndef check_shelf(ocn):\n\tshelf = shelve.open(SHELF_FILE, protocol=pickle.HIGHEST_PROTOCOL)\n\tif ocn in shelf:\n\t\tworkid = shelf[ocn]\n\t\tos.sys.stdout.write(\"[Cache] Found: \" + ocn + \"\\n\") \n\telse:\n\t\tworkid = query_oclc(ocn)\n\t\tif workid != None and workid != '':\n\t\t\tshelf[ocn] = workid\n\t\t\tos.sys.stdout.write('put %s %s into db\\n' % (ocn,workid))\n\tshelf.close()\n\treturn workid\n\ndef query_oclc(xid):\n\t'''\n\tSee the following for parameters:\n\thttp://www.oclc.org/developer/develop/web-services/xid-api/xstandardNumber-resource.en.html\n\t'''\n\tto_get = XID_RESOLVER % xid\n\tto_get += \"?method=getMetadata&format=xml&fl=*\" # could also try &fl=owi\n\tprint(to_get) # uncomment to get the full request URI\n\theaders = {\"Accept\":\"application/xml\"}\n\tresp = requests.get(to_get, headers=headers, allow_redirects=True)\n\tif resp.status_code == 200:\n\t\tdoc = libxml2.parseDoc(resp.text.encode(\"UTF-8\", errors=\"ignore\"))\n\t\tctxt = doc.xpathNewContext()\n\t\tif ctxt.xpathEval(\"//@stat[.='overlimit']\"):\n\t\t\tprint(\"over limit with %s\" % xid)\n\t\t\tsys.exit() \n\t\telse: \n\t\t\ttry:\n\t\t\t\towi = ctxt.xpathEval(\"//@owi\")[0].content\n\t\t\t\tcleanowi = owi.replace('owi','')\n\t\t\t\treturn WORK_ID + cleanowi\n\t\t\texcept:\n\t\t\t\tprint(\"no owi found\")\n\t\t\t\t\n\telif resp.status_code == 404:\n\t\tmsg = \"Not found: %s%s\" % (xid, os.linesep)\n\telif resp.status_code == 500:\n\t\tmsg = \"Server error (%s)\" % xid\n\telse: # resp.status_code isn't 200, 404 or 500:\n\t\tmsg = \" Response for %s was \" % xid\n\t\tmsg += \"%s%s\" % (resp.status_code, os.linesep)\n\tprint(msg)\n\t\n\tsleep(1)\n\t\n\t\nif __name__ == \"__main__\":\n\tmrxheader = \"\"\"\n\"\"\"\n\tfh = open('out/owi_tmp.xml', 'w+')\n\tfh.write(mrxheader)\n\treader = pymarc.marcxml.parse_xml_to_array(infile)\n\tfor rec in reader:\n\t\tfor n in rec.get_fields('035'):\n\t\t\tfor s in n.get_subfields('a'):\n\t\t\t\tif 'OCoLC' in s:\n\t\t\t\t\tnum = s.replace('(OCoLC)','')\n\t\t\t\t\tworkid = check_shelf(str(num))\n\t\tif workid != None and workid != '':\n\t\t\tfield = pymarc.Field(\n\t\t\t\ttag = '787', \n\t\t\t\tindicators = ['0',' '],\n\t\t\t\tsubfields = [\n\t\t\t\t\t'o', str(workid)\n\t\t\t\t])\n\t\t\trec.add_field(field)\n\t\tworkid = \"\"\n\t\tout = \"%s\" % (pymarc.record_to_xml(rec))\n\t\tfh.write(out)\n\tfh.write(\"\")\n\tfh.close()\n\t# format output for readability\n\tsubprocess.Popen(['xmllint','--format','-o', outfile,'owi_tmp.xml'])\n","sub_path":"owi.py","file_name":"owi.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"303403117","text":"import csv\nfrom pymongo import MongoClient\nimport re\nfrom datetime import datetime\n\nconnection = MongoClient()\ndb = connection.db_hw_mongo\n\n\nclass MongoDB:\n def __init__(self, csv_file, database, name):\n self.csv_file = csv_file\n self.database = database\n self.name = name\n\n def read_data(self):\n with open(self.csv_file, 'r', encoding='utf8') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n row_dict = {'Исполнитель': row[0],\n \"Цена\": int(row[1]),\n \"Место\": row[2],\n \"Дата\": datetime.strptime(row[3], '%d.%m')}\n self.database.artist.insert_one(row_dict)\n\n def find_cheapest(self):\n return self.database.artist.find().sort([('Цена', 1)])\n\n def find_by_name(self):\n regex = re.compile(self.name)\n return self.database.artist.find({'Исполнитель': regex})\n\n\ndata = MongoDB(csv_file='artists.csv', database=db, name=input(\"Введите название группы \"))\ndata.read_data()\nprint(data.find_cheapest())\nprint(data.find_by_name())\n","sub_path":"ORM_MongoDB.py","file_name":"ORM_MongoDB.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"546017418","text":"from unittest import TestCase\n\nfrom tests import get_data\nfrom pytezos.operation.forge import forge_operation_group\n\n\nclass OperationForgingTestoohafK(TestCase):\n\n def setUp(self):\n self.maxDiff = None\n \n def test_forge_oohafK(self):\n expected = get_data(\n path='operations/oohafKVbkS3Xv98MN9js66nnM1aWgfaKGfsEaMgmNQfyPSJ7pfN/forged.hex')\n actual = forge_operation_group(get_data(\n path='operations/oohafKVbkS3Xv98MN9js66nnM1aWgfaKGfsEaMgmNQfyPSJ7pfN/unsigned.json'))\n self.assertEqual(expected, actual)\n","sub_path":"tests/operations/oohafKVbkS3Xv98MN9js66nnM1aWgfaKGfsEaMgmNQfyPSJ7pfN/test_forge_oohafK.py","file_name":"test_forge_oohafK.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"292581847","text":"\nimport torch\nfrom pytorch_mnist import LeNet\nimport torchvision as tv\nimport torchvision.transforms as transforms\nimport cv2\nimport numpy as np\n\nimport rospy\nfrom geometry_msgs.msg import Pose, Point, Quaternion\n\n\nmodel_name = 'model/net_012.pth'\n# 定义是否使用GPU\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\n# 加载LeNet在mnist数据集上训练得到的模型\ndef load_mnist_model():\n # 定义网络模型LeNet\n net = LeNet().to(device)\n # 加载硬盘上的参数文件\n checkpoint = torch.load(model_name)\n # 加载参数到网络\n net.load_state_dict(checkpoint)\n\n return net\n\n\n# 将4个点组成的轮廓曲线画出\ndef draw_approx_curve(img, approx):\n for i in range(len(approx) - 1):\n cv2.line(img, (approx[i,0,0], approx[i,0,1]), (approx[i+1,0,0], approx[i+1,0,1]), (0, 0, 255), 2)\n cv2.line(img, (approx[0,0,0], approx[0,0,1]), (approx[-1,0,0], approx[-1,0,1]), (0, 0, 255), 2)\n\n\n# 计算两个点之间的欧式距离\ndef dis_points(p1, p2):\n p1 = np.squeeze(p1)\n p2 = np.squeeze(p2)\n dist = np.sqrt((p1[0]-p2[0])*(p1[0]-p2[0]) + (p1[1]-p2[1])*(p1[1]-p2[1]))\n return dist\n\n\n# 对图像中的4个点进行排序,顺序为左上,左下,右下,右上\ndef sort4points(points):\n lmin = 1e6\n lmax = 0\n imin = 0\n imax = 0\n for i in range(4):\n if points[i, 0] + points[i, 1] < lmin:\n lmin = points[i, 0] + points[i, 1]\n imin = i\n if points[i, 0] + points[i, 1] > lmax:\n lmax = points[i, 0] + points[i, 1]\n imax = i\n lx = 1e6\n ix = 0\n for i in range(4):\n if i != imin and i != imax:\n if points[i, 0] < lx:\n lx = points[i, 0]\n ix = i\n for i in range(4):\n if i != imin and i != imax and i != ix:\n iy = i\n newpts = np.zeros_like(points)\n newpts[0] = points[imin]\n newpts[1] = points[iy]\n newpts[2] = points[imax]\n newpts[3] = points[ix]\n return newpts\n\n\n# 提取图像中的方框,并进行手写数字识别\ndef box_extractor(img, net):\n # 图像边缘提取,使用Canny边缘检测算法\n edges = cv2.Canny(img, 100, 200)\n # 在边缘中查找图像中的封闭轮廓\n cnts, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n # 下面这行代码可以将封闭轮廓画出,用于调试\n # img = cv2.drawContours(img, cnts, -1, (0, 255, 0), 1)\n cx, cy = 0, 0\n # 遍历每条封闭轮廓\n for cnt in cnts:\n # 求轮廓周长\n peri = cv2.arcLength(cnt, True)\n # 对轮廓进行多边形拟合\n approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)\n # 找到4边型\n if len(approx) == 4 and cv2.isContourConvex(approx):\n edges = np.zeros(4, np.float32)\n # 计算4个边的边长\n edges[0] = dis_points(approx[0], approx[1])\n edges[1] = dis_points(approx[1], approx[2])\n edges[2] = dis_points(approx[2], approx[3])\n edges[3] = dis_points(approx[3], approx[0])\n # 计算4个边的最小值,进制,标准差\n e_min = np.min(edges)\n e_avg = np.mean(edges)\n e_std = np.std(edges)\n\n # 对4边形 进行约束\n if e_min > 10 and e_std / e_avg < 0.2:\n draw_approx_curve(img, approx)\n\n cx = (approx[0,0,0] + approx[1,0,0] + approx[2,0,0] + approx[3,0,0]) / 4.\n cy = (approx[0,0,1] + approx[1,0,1] + approx[2,0,1] + approx[3,0,1]) / 4.\n # 目标框在垂直正视视角的4个点的坐标\n pts_res = np.float32([[0, 0], [28, 0], [28, 28], [0, 28]])\n approx = np.squeeze(approx).astype(np.float32)\n # 调整四边形4个点的顺序,为左上,左下,右下,右上\n approx = sort4points(approx)\n # 进行视角变换,将目标框转换为垂直正视视角\n M = cv2.getPerspectiveTransform(approx, pts_res)\n N = cv2.warpPerspective(img, M, (28, 28), cv2.INTER_NEAREST)\n # 目标框的图像转换为灰度图\n N_gray = cv2.cvtColor(N, cv2.COLOR_BGR2GRAY).astype(np.float32)\n # 像素值归一化到0-1之间\n N_gray /= 255\n # 将图像转换为黑底白字,与mnist数据集中的样本相同\n N_gray = 1 - N_gray\n # 显示图像,为了测试使用\n cv2.imshow(\"N\", N_gray)\n # 将图像转换为网络输入所需形状(1x1x28x28)\n N_gray = np.expand_dims(np.expand_dims(N_gray, axis=0), axis=0)\n # 检测代码不需要自动梯度计算\n with torch.no_grad():\n N_in = torch.from_numpy(N_gray)\n N_in = N_in.to(device)\n outputs = net(N_in)\n # 网络的输出通过.cpu().numpy()转换为numpy格式,可以进行正常操作\n num = np.argmax(outputs.cpu().numpy())\n # 已经获得正确的数字检测结果\n # 在这里你可以加入任何任务级的代码 -----------------\n\n # 把检测结果在图像左上角显示出来\n cv2.putText(img, 'num: %d' % num, (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 4)\n pass\n\n print((cx, cy))\n return img, (cx, cy)\n\n\nif __name__ == '__main__':\n # 打开摄像头0\n cap = cv2.VideoCapture(0)\n # 加载网络参数,为了手写数字识别\n net = load_mnist_model()\n\n rospy.init_node('vision', anonymous=True)\n pub = rospy.Publisher('/vision/position', Pose, queue_size=10)\n\n while True:\n # 读摄像头一帧\n state, frame = cap.read()\n # 提取图像中的方框,并进行手写数字识别\n frame, cxcy = box_extractor(frame, net)\n # 显示图像\n\n pose = Pose(Point(cxcy[0], cxcy[1], 0), Quaternion(0., 0., 0., 0.))\n pub.publish(pose)\n cv2.imshow(\"capture\", frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # 释放内存\n cap.release()\n cv2.destroyAllWindows()\n","sub_path":"pytorch_mnist_camera.py","file_name":"pytorch_mnist_camera.py","file_ext":"py","file_size_in_byte":6237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"36795050","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 25 15:02:34 2019\n\n@author: S534629\n\"\"\"\n#below code is to store wikipedia data into csv file\nfrom bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\n\nURL=\"https://en.wikipedia.org/wiki/List_of_largest_pharmaceutical_companies_by_revenue\"\nresponse=requests.get(URL)\nsoup=BeautifulSoup(response.text, 'html.parser')\n\ntable= soup.find('table',{'class':'sortable wikitable'}).tbody\n\nrows=table.find_all('tr')\n\ncolumns=[v.text.replace('\\n','') for v in rows[0].find_all('th')]\n\ndf= pd.DataFrame(columns=columns)\n\nfor i in range(1, len(rows)):\n tds=rows[i].find_all('td')\n \n if len(tds)==4:\n values=[tds[0].text,tds[1].text.replace('\\n','').replace('\\xa0',''),'',\n tds[2].text.replace('\\n','').replace('\\xa0',''),'','','','','','','','']\n else:\n values=[td.text.replace('\\n','').replace('\\xa0','') for td in tds]\n \n \n df=df.append(pd.Series(values, index=columns), ignore_index=True)\n \n df.to_csv(r'C:\\Users\\S534629\\Downloads\\Web mining\\Assignments\\Project'+\n 'pharmatop10.csv',index=False)\n \n# Plotting the graph with the help of above obtained data\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\ndef readcsv(filename):\n data = pd.read_csv(filename) \n return(np.array(data))\n\ndata1=readcsv('ProjectpharmaTop10.csv')\n\n#storing different companies yearwise revenues from the csv table\nJNJ=[data1[0][4],data1[0][5],data1[0][6]]\nRoche=[data1[1][4],data1[1][5],data1[1][6]]\npfizer=[data1[2][4],data1[2][5],data1[2][6]]\nNovartis=[data1[3][4],data1[3][5],data1[3][6]]\nBayer=[data1[4][4],data1[4][5],data1[4][6]]\n\n#storing years of revenue\nyears=['2016','2017','2018']\n\n#plotting a line graph\nplt.plot(years,JNJ[::-1],'b.-',label='JNJ',marker=\"s\")\nplt.plot(years,Roche[::-1],'r.-',label='Roche',marker=\"P\")\nplt.plot(years,pfizer[::-1],'m.-',label='pfizer',marker=\"d\")\nplt.plot(years,Novartis[::-1],'y.-',label='Novartis',marker=\"o\")\nplt.plot(years,Bayer[::-1],'c.-',label='Bayer',marker=\"*\")\n\nplt.title('Pharma Company and their Revenue in last 3 years',fontweight='bold')\nplt.ylabel('Revenue in USD billions',fontweight='bold')\nplt.xlabel('Years',fontweight='bold')\nplt.ylim(0,90)\nplt.legend()\nplt.savefig('pharma_graph.png',transparent='True')\n","sub_path":"poster project.py","file_name":"poster project.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"288637976","text":"from collections import deque\n\ndef breadth_first_search(start_node):\n\n frontier = deque([[start_node]])\n\n while len(frontier) != 0:\n\n path = frontier.popleft()\n last_node = path[-1]\n\n if (last_node.is_goal()):\n print(\"BFS found path: \")\n # print(path)\n return path\n else:\n neighbours = last_node.get_neighbours()\n\n for n in neighbours:\n if not (n in path):\n new_path = path.copy() + [n]\n frontier.append(new_path)\n return []","sub_path":"No4/search_algorithms/BFS.py","file_name":"BFS.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"599266995","text":"from les.mp_model.mp_model import MPModel\n\nclass AddOnsMPM:\n \n def __init__(self):\n self.addons = 1\n \n def to_knapsack(self, model):\n Sobj = sum(model.get_objective_coefficients())\n Scon = model.get_rows_coefficients().sum(0).tolist()[0]\n Srhs = sum(model.get_rows_rhs())\n vrs = model.get_variables_names()\n return vrs, Scon, Srhs, Sobj/sum(Scon)\n","sub_path":"src/main/python/les/drivers/greedy_driver/to_knapsack.py","file_name":"to_knapsack.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"201651323","text":"import cv2\r\nimport numpy as np\r\nimport 모드\r\nimport 색\r\n\r\n\r\ndef main_proc(image):\r\n height, width = image.shape[:2]\r\n area = height * width\r\n height03 = height * 0.1\r\n width03 = width * 0.3\r\n\r\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\r\n data = hsv.reshape((-1,3)).astype(np.float32)\r\n K = 2\r\n term_crit = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)\r\n ret, labels, centers = cv2.kmeans(data, K, None, term_crit, 5, cv2.KMEANS_RANDOM_CENTERS)\r\n centers = np.uint8(centers)\r\n res = centers[labels.flatten()]\r\n dst = res.reshape(image.shape)\r\n\r\n gray = cv2.cvtColor(image, cv2.COLOR_BGRA2GRAY)\r\n blur = cv2.GaussianBlur(gray, (7, 7), 0)\r\n edged = cv2.Canny(blur, 30, 50)\r\n kernel = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize= (5,5))\r\n dilated = cv2.dilate(edged, kernel, iterations=10)\r\n (_, contours, _) = cv2.findContours(dilated, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\r\n\r\n score = []\r\n for cnt in contours:\r\n # cv2.drawContours(image, [cnt], -1, 색.파, 3)\r\n x, y, w, h = cv2.boundingRect(cnt)\r\n if (w * h) / area < 0.1 and not (w > width03 or h > height03): continue\r\n cv2.drawContours(image, [cnt], -1, 색.시안, 3)\r\n # cv2.rectangle(image, (x, y), (x + w, y + h), 색.빨, 3)\r\n #\r\n # poly = cv2.approxPolyDP(cnt, epsilon=20, closed=True)\r\n # cv2.drawContours(image, [poly], -1, 색.초, 3)\r\n\r\n # 출력\r\n return {\r\n 'image': image,\r\n # 'gray': gray,\r\n # 'edged': edged,\r\n 'dilated': dilated,\r\n 'dst': dst\r\n }\r\n\r\n\r\nr = 모드.사진(main_proc, 0.3)\r\nr.run()\r\n\r\n","sub_path":"pylab/beta lab/#7. 클러스터링 분할.py","file_name":"#7. 클러스터링 분할.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"591515711","text":"\n\nfrom xai.brain.wordbase.verbs._outvote import _OUTVOTE\n\n#calss header\nclass _OUTVOTES(_OUTVOTE, ):\n\tdef __init__(self,): \n\t\t_OUTVOTE.__init__(self)\n\t\tself.name = \"OUTVOTES\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"outvote\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_outvotes.py","file_name":"_outvotes.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"254703226","text":"\"\"\" Compiled: 2020-08-14 16:40:05 \"\"\"\n\n#__src_file__ = \"extensions/arc_writers/./etc/FMarketRiskExportLogSettingsTab.py\"\n\"\"\"----------------------------------------------------------------------------\nMODULE\n FMarketRiskExportLogSettingsTab - General output settings\n\n (c) Copyright 2019 by FIS FRONT ARENA. All rights reserved.\n\nDESCRIPTION\n\n This is a GUI tab in the FMarketRiskExport GUI which contains settings\n which are changed frequently, e.g. name of the report.\n\n----------------------------------------------------------------------------\"\"\"\n\nimport FRunScriptGUI\nimport FBDPWorld\n\nclass MarketRiskExportLogSettingsTab(FRunScriptGUI.AelVariablesHandler):\n\n def logfile_cb(self, index, fieldValues):\n self.Logfile.enable(fieldValues[index], 'You have to check Log To '\n 'File to be able to select a Logfile.')\n return fieldValues\n\n def sendReportByMail_cb(self, index, fieldValues):\n tt = ('This field is only applicable if Send Report By Mail is '\n 'selected.')\n self.MailList.enable(fieldValues[index], tt)\n self.ReportMessageType.enable(fieldValues[index], tt)\n return fieldValues\n\n def reportMessageType_cb(self, index, fieldValues):\n if 'Full Log' in fieldValues[index]:\n fieldValues[index] = 'Full Log'\n return fieldValues\n\n def __init__(self):\n\n ttLogMode = 'Defines the amount of logging produced.'\n ttLogToCon = ('Whether logging should be done in the Log Console or '\n 'not.')\n ttLogToFile = 'Defines whether logging should be done to file.'\n ttLogFile = ('Name of the logfile. Could include the whole path, '\n 'c:\\log\\...')\n ttSendReportByMail = ('Send reports by email when procedure is '\n 'finished.')\n ttMailList = ('Specify mail recipients. Specify them in the form: '\n 'user1@address.com, user2@address.com.')\n ttReportMessageType = ('Whether the report should be the full log, or '\n 'if it should be only the selected messagetypes. If the '\n 'selected messagetypes does not occur, no mail will be sent.')\n\n messageTypes = ['Full Log', 'START', 'FINISH', 'ABORT', 'ERROR',\n 'WARNING', 'NOTIME', 'INFO', 'DEBUG']\n variables = [\n # [VariableName,\n # DisplayName,\n # Type, CandidateValues, Default,\n # Mandatory, Multiple, Description, InputHook, Enabled]\n ['Logmode',\n 'Logmode_Logging',\n 'int', [0, 1, 2], 0,\n 2, 0, ttLogMode],\n ['LogToConsole',\n 'Log to console_Logging',\n 'int', [1, 0], 1,\n 1, 0, ttLogToCon],\n ['LogToFile',\n 'Log to file_Logging',\n 'int', [1, 0], 1,\n 1, 0, ttLogToFile, self.logfile_cb],\n ['Logfile',\n 'Logfile_Logging',\n 'string', None, 'c:\\\\temp\\\\market_risk_export.log',\n 0, 0, ttLogFile, None, None],\n ['SendReportByMail',\n 'Send report by mail_Logging',\n 'int', [1, 0], None,\n 0, 0, ttSendReportByMail, self.sendReportByMail_cb],\n ['MailList',\n 'MailList_Logging',\n 'string', None, None,\n 0, 0, ttMailList],\n ['ReportMessageType',\n 'ReportMessageType_Logging',\n 'string', messageTypes, 'Full Log',\n 2, 1, ttReportMessageType, self.reportMessageType_cb]\n ]\n FRunScriptGUI.AelVariablesHandler.__init__(self, variables, __name__)\n\n\ndef getAelVariables():\n outtab = MarketRiskExportLogSettingsTab()\n outtab.LoadDefaultValues(__name__)\n return outtab\n\ndef logger_setup(ael_variables):\n logger = FBDPWorld.CreateWorld(ael_variables)\n return logger\n","sub_path":"Extensions/ARC Market Risk Exporter FIS/FPythonCode/FMarketRiskExportLogSettingsTab.py","file_name":"FMarketRiskExportLogSettingsTab.py","file_ext":"py","file_size_in_byte":4175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"459889350","text":"from common_defs import *\n\nfrom weka.classifiers import Classifier\n\nspace = {\n 'rule': hp.choice('r', ('AVG', 'PROD', 'MAJ', 'MIN', 'MAX')),\n 'seed': hp.quniform('s', 1, 1, 1),\n}\n\n\ndef get_params():\n params = sample(space)\n return handle_integers(params)\n\n\ndef get_class(params):\n # pprint(params)\n\n L = list([])\n\n L.append(\"-R\")\n L.append(params['rule'])\n\n L.append(\"-S\")\n L.append(str(params['seed']))\n\n\n clf = Classifier(classname=\"weka.classifiers.meta.Vote\", options=L)\n return clf\n\n\ndef try_params(n_instances, params, train, test, istest):\n n_instances = int(round(n_instances))\n pprint(params)\n\n L = list([])\n\n L.append(\"-R\")\n L.append(params['rule'])\n\n L.append(\"-S\")\n L.append(str(params['seed']))\n\n\n clf = Classifier(classname=\"weka.classifiers.meta.Vote\", options=L)\n\n\n if istest:\n result = test_weka_classifier(clf, train, test)\n else:\n result = train_and_eval_weka_classifier(clf, train, n_instances)\n\n\n return result\n","sub_path":"defs/meta/vote.py","file_name":"vote.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"489492246","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n gsliu 2017-03-03\n \n\"\"\"\nimport re\nimport urllib.request\n\nfrom bs4 import BeautifulSoup\n\npage = 1\nurl = 'http://www.qiushibaike.com/hot/8hr/page/' + str(page)\nif page > 1:\n url = url + '/?s=4961777'\nuser_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'\nheaders = {'User-Agent': user_agent}\ntry:\n request = urllib.request.Request(url, headers=headers)\n response = urllib.request.urlopen(request)\n content = response.read().decode('utf-8')\n # # content = content.replace('
', '\\n')\n # pattern = re.compile('
.*?

(.*?)

.*?'\n # '
.*?(.*?).*?(.*?)', re.S)\n # items = re.findall(pattern, content)\n # for item in items:\n # rep = re.compile('
')\n # text = re.sub(rep, '\\n', item[1])\n # print(item[0] + ':', '\\n', text, '\\n', u'点赞:' + item[2], '\\n')\n soup = BeautifulSoup(content)\n\n authors = soup.find_all(class_=\"article block untagged mb15\")\n for authou in authors:\n main = authou.find(class_=\"content\")\n if main.span.string is None:\n continue\n print('名字:', authou.h2.string)\n print('内容:', main.span.string)\n number = authou.find(class_=\"number\")\n print('点赞数:', number.string)\n print('')\n\nexcept urllib.request.URLError as e:\n if hasattr(e, \"code\"):\n print(e.code)\n if hasattr(e, \"reason\"):\n print(e.reason)\n","sub_path":"py2/crawler_test.py","file_name":"crawler_test.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"436775972","text":"\nimport pickle\nimport numpy as np\nimport pandas as pd\n\nwith open(\"static/adf.pkl\", \"rb\") as f:\n adf = pickle.load(f)\n\nwith open(\"static/lr.pkl\", \"rb\") as f:\n lr = pickle.load(f)\n\ndef get_random_craft(df=adf):\n \"\"\"\n Input: DataFrame of aircraft in NTSB investigations.\n\n Output: Single, randomly selected row.\n \"\"\"\n features = ['homebuilt_coded', 'light_out', 'airframe_hours', 'gross_weight',\n 'visibility', 'wind_velocity', 'mid_air_acc', 'is_plane', 'instructional_flight',\n 'personal_flight', 'pilot_hours', 'pro_pilot', 'pilot_age', 'cert_level', \n 'seats', 'scnd_pilot', 'since_inspection', 'airport_dist', 'y']\n craft = df.sample(1)\n return craft[features]\n\ndef to_yn(feature):\n if feature==\"1\":\n return \"Yes\"\n else:\n return \"No\"\n\ndef cert_decode(feature):\n if feature==\"1\":\n return \"Basic\"\n elif feature==\"2\":\n return \"Sport\"\n elif feature==\"3\":\n return \"Class 3\"\n elif feature==\"4\":\n return \"Class 2\"\n elif feature==\"5\":\n return \"Class 1\"\n else:\n return \"None/unknown\"\n\ndef get_display_features(craft):\n craft = craft.applymap(str)\n for var in ['is_plane', 'homebuilt_coded', 'pro_pilot', 'scnd_pilot',\n 'mid_air_acc', 'instructional_flight', 'personal_flight',\n 'light_out']:\n craft[var][0] = to_yn(craft[var][0])\n craft['cert_level'][0] = cert_decode(craft['cert_level'][0])\n return craft\n\ndef get_prediction(craft):\n prediction = lr.predict(craft.iloc[:, :-1])[0]\n confidence = lr.predict_proba(craft.iloc[:, :-1])[0][prediction]\n confidence = str(int(100*(round(confidence, 2))))+\"%\"\n return prediction, confidence\n","sub_path":"flask_app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"435108268","text":"# John found a briefcase with a 4-digit lock where the only digits available\n# are 0, 1, 2, 3, 4, and 5. Beside the lock, a hint is written that says\n# The third number is 3. Help John open the briefcase by writing a python\n# program that prints all the possible combinations.\n\n\nimport itertools\n\nfor i in itertools.product([0, 1, 2, 3, 4, 5], repeat=4):\n arr = list(i)\n arr[2] = 3\n print(\"\".join(map(str, arr)))\n","sub_path":"Modules/itertools_module/dev_exercise/trizh_lock.py","file_name":"trizh_lock.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"306254632","text":"import logging\nfrom datetime import datetime\n\nfrom tornado.web import RequestHandler, HTTPError\n\nfrom octopus.handlers import BaseHandler\nfrom octopus.models import User, BingoGame, BingoTurn\nfrom octopus import models\n\nlogger = logging.getLogger(__name__)\n\n\nclass BingoGamesHandler(BaseHandler):\n def post(self):\n data = self._get_data()\n game = BingoGame(name=data['name'],\n user1=self.user)\n if 'metric' in data:\n game.user1_metric = data['metric']\n game.save()\n self.write(game.to_dict())\n\n def get(self):\n games = BingoGame.list()\n data = {'data': g.to_dict for g in games}\n self.write(data)\n\nclass BingoGameHandler(BaseHandler):\n def put(self, game_id):\n game = BingoGame.get(game_id)\n data = self._get_data()\n curr_user = self.user\n if curr_user == game.user1:\n game.user1_metric = data['metric']\n elif curr_user == game.user2:\n game.user2_metric = data['metric']\n elif not game.user2:\n game.user2 = self.user\n if 'metric' in data:\n game.user2_metric = data['metric']\n game.status = models.PREPARING\n else:\n raise HTTPError(400, 'game_full:%s' % game_id, reason='game_full')\n\n if game.user1_metric and game.user2_metric:\n # game start\n game.status = models.PLAYING\n turn = BingoTurn(game, 1, game.user1)\n turn.save()\n game.turns.append(turn)\n\n game.save()\n\n def get(self, game_id):\n game = BingoGame.get(game_id)\n game_dict = game.to_dict()\n game_dict['called_no'] = [t.called for t in game.turns if t.called]\n if game.status == models.FINISHED:\n game_dict['user1_metric'] = game.user1_metric\n game_dict['user2_metric'] = game.user2_metric\n self.write(game_dict)\n\n\nclass BingoTurnHandler(BaseHandler):\n def put(self, game_id, turn_no):\n turn_id = BingoTurn.turn_id(game_id, turn_no)\n turn = BingoTurn.get(turn_id)\n if self.user == turn.player:\n game = BingoGame.get(game_id)\n\n data = self._get_data()\n turn.call(data['call'])\n turn.save()\n\n called_no = [t.turn_no for t in game.turns]\n user1_won, user1_cond = game.won(game.user1_metric, called_no)\n user2_won, user2_cond = game.won(game.user1_metric, called_no)\n if any((user1_won, user2_won)):\n # XXX win the same time?\n if user1_won:\n game.winner = game.user1\n game.condition = user1_cond\n elif user2_won:\n game.winner = game.user2\n game.condition = user2_cond\n game.status = models.FINISHED\n game.end_time = datetime.utcnow()\n else:\n # next turn\n if turn.player == game.user1:\n next_user = game.user2\n else:\n next_user = game.user1\n next_turn = BingoTurn(game, turn.turn_no + 1, next_user)\n next_turn.save()\n game.turns.append(next_turn)\n game.save()\n else:\n raise HTTPError(400,\n 'not_your_turn:%s,%s' % (turn_id,self.user),\n reason='not_your_turn')\n\n def get(self, game_id, turn_no):\n turn_id = BingoTurn.turn_id(game_id, turn_no)\n turn = BingoTurn.get(turn_id)\n self.write(turn.to_dict())\n","sub_path":"octopus/handlers/bingo.py","file_name":"bingo.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"372062819","text":"import asyncio\nimport json\nimport sys\nfrom typing import Text, Dict, Optional, List, Any, Iterable, Tuple, Union\n\nimport rasa.utils.io as io_utils\nfrom rasa.constants import (\n DEFAULT_RESULTS_PATH,\n)\nimport rasa.cli.utils as cli_utils\nimport rasa.utils.common as utils\nfrom rasa.exceptions import ModelNotFound\n\n\ndef test_core(\n model: Optional[Text] = None,\n stories: Optional[Text] = None,\n endpoints: Optional[Text] = None,\n output: Text = DEFAULT_RESULTS_PATH\n):\n import rasa.core.utils as core_utils\n import rasa.model\n from rasa.core.interpreter import RegexInterpreter, NaturalLanguageInterpreter\n from rasa.core.agent import Agent\n\n _endpoints = core_utils.AvailableEndpoints.read_endpoints(endpoints)\n\n additional_arguments = {'e2e': True}\n\n if output:\n io_utils.create_directory(output)\n\n try:\n unpacked_model = rasa.model.get_model(model)\n except ModelNotFound:\n cli_utils.print_error(\n \"Unable to test: could not find a model. Use 'rasa train' to train a \"\n \"Rasa model and provide it via the '--model' argument.\"\n )\n return\n\n core_path, nlu_path = rasa.model.get_model_subdirectories(unpacked_model)\n\n if not core_path:\n cli_utils.print_error(\n \"Unable to test: could not find a Core model. Use 'rasa train' to train a \"\n \"Rasa model and provide it via the '--model' argument.\"\n )\n\n use_e2e = additional_arguments.get(\"e2e\", False)\n\n _interpreter = RegexInterpreter()\n if nlu_path:\n _interpreter = NaturalLanguageInterpreter.create(_endpoints.nlu or nlu_path)\n elif use_e2e:\n cli_utils.print_warning(\n \"No NLU model found. Using default 'RegexInterpreter' for end-to-end \"\n \"evaluation.\"\n )\n\n _agent = Agent.load(unpacked_model, interpreter=_interpreter)\n\n from rasa.core.test import test\n\n kwargs = utils.minimal_kwargs(additional_arguments, test, [\"stories\", \"agent\"])\n\n _test_core(stories, _agent, output, **kwargs)\n\n\ndef _test_core(\n stories: Optional[Text], agent: \"Agent\", output_directory: Text, **kwargs: Any\n) -> None:\n from rasa.core.test import test\n\n loop = asyncio.get_event_loop()\n res = loop.run_until_complete(\n test(stories, agent, out_directory=output_directory, **kwargs)\n )\n\n print(res['report'])\n\n with open(output_directory + '/results.json', 'w') as outfile:\n json.dump(res, outfile, ensure_ascii=True, indent=4)\n\n\nconfig = sys.argv[1]\ntest_core('END2END_models/' + config, 'tests', None, 'END2END_results/' + config)\n","sub_path":"test_e2e.py","file_name":"test_e2e.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"287419261","text":"from apiclient.discovery import build\nfrom apiclient.errors import HttpError\nfrom oauth2client.tools import argparser\n\nfrom org.chula.courseville.model import Video\nfrom org.chula.courseville.utils.Constant import *\n\nyoutube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,\n developerKey=DEVELOPER_KEY)\ndef youtubeSearchByKeyword(keyword):\n\n search_response = youtube.search().list(\n q=keyword,\n type=\"video\",\n part=\"id,snippet\",\n maxResults=MAX_RESULT\n ).execute()\n\n search_videos = []\n\n for search_result in search_response.get(\"items\", []):\n search_videos.append(search_result[\"id\"][\"videoId\"])\n video_ids = \",\".join(search_videos)\n\n\n return transformToVideoModel(video_ids)\n\ndef transformToVideoModel(video_ids):\n\n video_response = youtube.videos().list(\n id=video_ids,\n part='id, snippet, contentDetails,localizations,liveStreamingDetails, player,status,topicDetails,recordingDetails, statistics'\n ).execute()\n videos = []\n\n for video_result in video_response.get(\"items\", []):\n videos.append(Video(video_result[\"id\"],video_result[\"snippet\"], video_result[\"statistics\"]))\n # video_result[\"snippet\"][\"description\"]\n return videos","sub_path":"org/chula/courseville/connection/YoutubeApiConnection.py","file_name":"YoutubeApiConnection.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"287413625","text":"#!/usr/bin/python\nfrom __future__ import print_function\nimport sys\nimport libvirt\nfrom pprint import pprint as pp\nimport xml.etree.ElementTree as ET\n\n\ndef get_vm_hostnames():\n ns = {'url': 'http://openstack.org/xmlns/libvirt/nova/1.0'}\n conn = libvirt.open('qemu:///system')\n if conn == None:\n print('Failed to open connection to qemu:///system', file=sys.stderr)\n exit(1)\n\n for i in conn.listAllDomains():\n raw_xml = i.XMLDesc(0)\n root = ET.fromstring(raw_xml)\n for child in root.findall('metadata'):\n for c in child.findall('url:instance', ns):\n for cc in c:\n if cc.tag == '{%s}name' % ns['url']:\n print(cc.text)\n\n\nif __name__ == '__main__':\n get_vm_hostnames()\n","sub_path":"modules/libvirt/get-vm-hostnames.py","file_name":"get-vm-hostnames.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"645568770","text":"item_list = []\r\nitems = {}\r\n\r\ndef add():\r\n item_name = input(\"Enter Item Name : \")\r\n item_price = input(\"Enter Item Price : \")\r\n item_desc = input(\"Enter Item Description : \")\r\n\r\n items[\"Name\"] = item_name\r\n items[\"Price\"] = item_price\r\n items[\"Description\"] = item_desc\r\n\r\n # item_list.append([item_name,item_price,item_desc])\r\n # print(item_list)\r\n # for item in item_list:\r\n # print(item)\r\n\r\n item_list.append(items.copy())\r\n\r\n read()\r\n\r\ndef read():\r\n counter = 0\r\n for item in item_list:\r\n counter += 1\r\n print(counter,item)\r\n\r\ndef update():\r\n to_update = int(input(\"Enter Product id to update : \"))\r\n print(\"You want to update \",item_list[to_update-1])\r\n update_choice = input(\"What do you want to update?? Name or Price : \")\r\n if update_choice == \"Name\":\r\n updated_Name = input(\"Enter Updated Name : \")\r\n new_data = item_list[to_update-1]\r\n new_data[\"Name\"] = updated_Name\r\n # print(new_data)\r\n print(\"Updated List\")\r\n read()\r\n elif update_choice == \"Price\":\r\n updated_Price = input(\"Enter Updated Price : \")\r\n new_data = item_list[to_update-1]\r\n new_data[\"Price\"] = updated_Price\r\n # print(new_data)\r\n print(\"Updated List\")\r\n read()\r\n else:\r\n print(\"Wrong Choice...\")\r\n\r\ndef delete():\r\n to_delete = int(input(\"Enter Item ID : \"))\r\n del(item_list[to_delete-1])\r\n\r\n read()\r\n\r\ndef search():\r\n pass\r\n\r\ndef sorting():\r\n pass\r\n\r\ndef save():\r\n file = open(\"Item_List.txt\",'a')\r\n print(\"Writing Data....\")\r\n # file.write(str(item_list))\r\n for data in item_list:\r\n file.write(str(data)+\"\\n\")\r\n print(\"Successfully written...\")\r\n # print(str(item_list))\r\n file.close()\r\n\r\ndef load():\r\n file = open(\"Item_List.txt\",'r')\r\n data = file.readlines()\r\n for i in data:\r\n item_list.append(i)\r\n #item_list.append(file.readlines())\r\n read()\r\n file.close()\r\n\r\ndef errHandler():\r\n print(\"Wrong Choice, Try Again....\")\r\n\r\nwhile True:\r\n print(\"\"\"\r\n 1. Add Item\r\n 2. Read Item\r\n 3. Update Item\r\n 4. Delete Item\r\n 5. Search Item\r\n 6. Sort Item\r\n 7. Save Item\r\n 8. Load Item\r\n 9. Exit\r\n \"\"\")\r\n\r\n todo = {\r\n \"1\" : add,\r\n \"2\" : read,\r\n \"3\" : update,\r\n \"4\" : delete,\r\n \"5\" : search,\r\n \"6\" : sorting,\r\n \"7\" : save,\r\n \"8\" : load,\r\n \"9\" : quit\r\n }\r\n\r\n user_choice = input(\"Enter your choice : \")\r\n\r\n todo.get(user_choice,errHandler)()\r\n","sub_path":"Applications/Item_CRUD/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"599412157","text":"from flask.ext.mail import Message\nfrom flask import render_template\nfrom app import app\n\napp.config['MAPS4ALL_MAIL_SUBJECT_PREFIX'] = '[Maps4All] '\napp.config['MAPS4ALL_MAIL_SENDER'] = 'Maps4All Admin '\n\nfrom app import mail\n \ndef send_email(to, subject, template, **kwargs):\n\t\"\"\"Send an email from the Maps4App administrator email address.\n\n\tKeyword arguments:\n to -- recipient's email address\n subject -- subject of the email\n template -- template with which to send the email\n\n\t\"\"\"\n\tmsg = Message(app.config['MAPS4ALL_MAIL_SUBJECT_PREFIX'] + subject,\n\t\tsender=app.config['MAPS4ALL_MAIL_SENDER'], recipients=[to])\n\tmsg.body = render_template(template + '.txt', **kwargs)\n\tmsg.html = render_template(template + '.html', **kwargs)\n\tmail.send(msg)\n\ndef send_create_account_email(user):\n\t\"\"\"Helper function to send a \"Create Account\" email to an invited user.\n\n\tKeyword arguments:\n user -- the InvitedUser to which to send the email\n\n\t\"\"\"\n\tsend_email(user.email, 'Create Your Account', \n\t\t\t 'admin/mail/create_account', user=user, token=user.token)\n\ndef send_confirm_account_email(user):\n\t\"\"\"Helper function to send a \"Confirm Account\" email to a registered user.\n\n\tKeyword arguments:\n user -- the User to which to send the email\n\n\t\"\"\"\n\tsend_email(user.email, 'Confirm Your Account', 'auth/mail/confirm', \n\t\t\t user=user, token=user.token)","sub_path":"app/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"181142591","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\n\n\nfrom decimal import *\nimport datetime\n\nfrom day.models.delivery_models import Delivery\nfrom day.models.payment_models import Payment, PayOut\nfrom day.models.item_models import NonPartneredItem, PartneredItem\nfrom day.models.models import PartneredBusiness, SimpleContact\nfrom day.forms import(\n\tStartPartneredDeliveryForm, \n\tPaymentForm,\n\tDeliveryNoteEditForm, \n\tDeliveryTransferForm,\n\tDeliveryPartneredAtPickupForm, \n\tSimpleContactRequireAddress,\n\tAtPartneredBusinessItemInfoForm,\n\tPayOutForm, \n\tBillingForm, \n\tPhoneNumberForm, \n\tNonPartneredItemForm,\n\tDeliveryOverrideDeliveryFeesModelForm,\n\tSearchForm,\n)\nfrom day.helpers import(\n\tTODAY, \n\tNOW, \n\tcreate_history, \n\tupdate_simple_contact,\n\tfind_contacts,\n\tget_redirect_to,\n\tclear_session_vars\n)\nfrom day.delivery_helpers import find_item_ready_time\n\n# Helpers\n\ndef delete_session_delivery_contacts_found(request):\n\ttry:\n\t\tdel request.session['delivery_contacts_found']\n\texcept KeyError:\n\t\tpass\n\n# Views\n@login_required()\ndef delivery_manager(request):\n\tclear_session_vars(request)\n\tdate = datetime.date.today()\n\tto_view = {\n\t\t'refresh': True,\n\t\t'date': date,}\n\tto_view.update(Delivery.find_deliveries(date, request.user))\n\treturn render(request, 'delivery_manager.html', to_view)\n\t\n@login_required\ndef delivery_add_partnered_item_info_at_pickup(request, delivery_id, item_id):\n\tdelivery = get_object_or_404(Delivery, pk=delivery_id)\n\tnext_view = request.session['redirect_to']\n\tpartnered_item = delivery.partnered_items.get(pk=item_id)\n\tif partnered_item.from_partnered_business.pay_out_method in ['ta', 'op']:\n\t\t# if payout is tab based\n\t\tform = AtPartneredBusinessItemInfoForm(request.POST or None, initial={\n\t\t\t'delivery_fee': partnered_item.from_partnered_business.delivery_fee,\n\t\t\t'notes': delivery.notes,\n\t\t})\n\t\tif form.is_valid():\n\t\t\tdelivery.notes = request.POST.get('notes')\n\t\t\tpartnered_item.price = request.POST['customers_price']\n\t\t\tdelivery.save()\n\t\t\tpartnered_item.save()\n\t\t\treturn redirect(next_view)\n\t\treturn render(request, 'delivery_base.html', {\n\t\t\t'header_text': 'Add Item Info',\n\t\t\t'cancel': True,\n\t\t\t'form': form, \n\t\t\t'save_button_text': 'Save Item Info',\n\t\t\t'delivery_id': delivery.id, \n\t\t\t'next_view': next_view,\n\t\t\t'delivery': delivery,\n\t\t\t'delivery_summary': True,\n\t\t})\n\tif partnered_item.from_partnered_business.pay_out_method == 'ug':\n\t\t#if make payout if pay as you go\n\t\tif partnered_item.payout == None:\n\t\t\tform = PayOutForm(request.POST or None)\n\t\t\tif form.is_valid():\n\t\t\t\tpayout = PayOut.objects.create(\n\t\t\t\t\tpayment_type = request.POST['payment_type'],\n\t\t\t\t\tprice_paid_out = Decimal(request.POST['price_paid_out']),\n\t\t\t\t\tpaid_by = request.user,\n\t\t\t\t)\n\t\t\t\tpartnered_item.payout = payout\n\t\t\t\tpartnered_item.save()\n\t\t\t\tpartnered_item.calculate_price_after_percentage()\n\t\t\t\tdelivery.assign_delivery_fees()\n\t\t\t\treturn redirect(next_view)\n\t\t\treturn render(request, 'delivery_base.html', {\n\t\t\t\t'header_text': 'Pay out tab',\n\t\t\t\t'cancel': True,\n\t\t\t\t'form': form, \n\t\t\t\t'save_button_text': 'Save Item Pay Out Info',\n\t\t\t\t'delivery_id': delivery.id, \n\t\t\t\t'next_view': next_view,\n\t\t\t\t'delivery': delivery,\n\t\t\t\t'delivery_summary': True,\n\t\t\t})\n\tif partnered_item.from_partnered_business.pay_out_method == 'bi':\n\t\t# if payout is bill\n\t\tif not delivery.billed:\n\t\t\tform = BillingForm(request.POST or None, initial={\n\t\t\t\t'estimated_payment_date': timezone.now,\n\t\t\t\t'bill_to': partnered_item.from_partnered_business.contact,\n\t\t\t\t'price_billed': delivery.total_currently_due,})\n\t\t\tif form.is_valid():\n\t\t\t\tbill = form.save()\n\t\t\t\tdelivery.billing = bill\n\t\t\t\tdelivery.save()\n\t\t\t\treturn redirect(next_view)\n\t\t\treturn render(request, 'delivery_base.html', {\n\t\t\t\t'header_text': 'Bill partnered business',\n\t\t\t\t'cancel': True,\n\t\t\t\t'form': form, \n\t\t\t\t'save_button_text': 'Save bill info',\n\t\t\t\t'delivery_id': delivery.id, \n\t\t\t\t'next_view': next_view,\n\t\t\t\t'delivery': delivery,\n\t\t\t\t'delivery_summary': True,\n\t\t\t})\n\treturn render(request, 'delivery_base.html', {\n\t\t'header_text': 'Add partnered item',\n\t\t'cancel': True,\n\t\t'next_view': next_view,\n\t\t'delivery_summary': True,\n\t\t'delivery': delivery,\n\t})\n\t\n@login_required\ndef delivery_add_non_partnered_item_info_at_pickup(request, delivery_id, item_id):\n\tdelivery = get_object_or_404(Delivery, pk=delivery_id)\n\titem = delivery.non_partnered_items.get(pk=item_id)\n\tif item.payout == None:\n\t\tform = PayOutForm(request.POST or None)\n\t\tif form.is_valid():\n\t\t\tpayout = PayOut.objects.create(\n\t\t\t\tpayment_type = request.POST['payment_type'],\n\t\t\t\tprice_paid_out = Decimal(request.POST['price_paid_out']),\n\t\t\t\tpaid_by = request.user,\n\t\t\t)\n\t\t\titem.payout = payout\n\t\t\titem.time_out_for_delivery = timezone.now()\n\t\t\titem.save()\n\t\t\tdelivery.save()\n\t\t\titem.calculate_price_after_percentage()\n\t\t\treturn redirect('delivery_continue', delivery.id)\n\t\treturn render(request, 'delivery_base.html', {\n\t\t\t'header_text': 'Pay for non-partnered item %s' % item,\n\t\t\t'cancel': True,\n\t\t\t'form': form, \n\t\t\t'save_button_text': 'Save pay out info',\n\t\t\t'delivery_id': delivery.id, \n\t\t\t'delivery': delivery,\n\t\t\t'delivery_summary': True,\n\t\t})\n\treturn render(request, 'delivery_base.html', {\n\t\t'header_text': 'Add non-partnered item',\n\t\t'cancel': True,\n\t\t'delivery_summary': True,\n\t\t'delivery': delivery,\n\t})\n\t\n@login_required\ndef delivery_add_destination(request, delivery_id):\n\tdelivery = get_object_or_404(Delivery, pk=delivery_id)\n\tnext_view = request.session['redirect_to']\n\tform = SimpleContactRequireAddress(request.POST or None, initial={\n\t\t\t'address': delivery.temp_destination_address,\n\t\t\t'phone_number': request.session.get('temp_phone_number')\n\t})\n\ttry:\n\t\tdel request.session['temp_phone_number']\n\texcept KeyError:\n\t\tpass\n\tif form.is_valid():\n\t\tcontact = form.save()\n\t\tdelivery.temp_destination_address = None\n\t\tdelivery.confirmed_destination = contact\n\t\tdelivery.save()\n\t\tdelete_session_delivery_contacts_found(request)\n\t\treturn redirect(next_view)\n\treturn render(request, 'delivery_base.html', {\n\t\t'header_text': 'Add Destination Info',\n\t\t'cancel': True, \n\t\t'next_view': next_view,\n\t\t'form': form, \n\t\t'save_button_text': 'Save Destination Info',\n\t\t'delivery': delivery,\n\t\t'delivery_summary': True\n\t})\n\t\n@login_required\ndef delivery_confirm_destination_match(request, delivery_id, contact_id):\n\tdelivery = get_object_or_404(Delivery, pk=delivery_id)\n\tnext_view = request.session['redirect_to']\n\tcontact = get_object_or_404(SimpleContact, pk=contact_id)\n\tform = SimpleContactRequireAddress(request.POST or None, instance=contact)\n\tif form.is_valid():\n\t\tcontact = form.save()\n\t\tdelivery.temp_destination_address = None\n\t\tdelivery.confirmed_destination = contact\n\t\tdelivery.save()\n\t\tdelete_session_delivery_contacts_found(request)\n\t\treturn redirect(next_view)\n\treturn render(request, 'delivery_contacts_matching.html', {\n\t\t'header_text': 'Confirm destination match',\n\t\t'cancel': True, \n\t\t'message': 'Matching contact found',\n\t\t'form': form, \n\t\t'save_button_text': 'Save Destination Info',\n\t\t'delivery_id': delivery.id, \n\t\t'next_view': next_view,\n\t})\n\t\t\n@login_required\ndef delivery_confirm_destination(request, delivery_id, search_by='address'):\n\tdelivery = get_object_or_404(Delivery, pk=delivery_id)\n\tnext_view = request.session['redirect_to']\n\tif search_by=='address':\n\t\tcontacts = SimpleContact.find_contacts(address=delivery.temp_destination_address)\n\telif serch_by=='phone_number':\n\t\tcontacts = SimpleContact.find_contacts(phone_number=phone_number)\n\trequest.session['delivery_contacts_found'] = len(contacts['contacts'])\n\tif request.session.get('delivery_contacts_found') == 0:\n\t\tdelete_session_delivery_contacts_found(request)\n\t\treturn redirect('delivery_add_destination', delivery_id=delivery.id)\n\tif request.session.get('delivery_contacts_found') == 1:\n\t\tdelete_session_delivery_contacts_found(request)\n\t\treturn redirect('delivery_confirm_destination_match', \n\t\t\tdelivery_id=delivery.id, next_view=next_view, contact_id=contacts['contacts'][0].id)\n\tif request.session.get('delivery_contacts_found') > 1:\n\t\tdelete_session_delivery_contacts_found(request)\n\t\treturn render(request, 'delivery_contact_many_matches.html', {\n\t\t\t'header_text': 'Many contacts found', \n\t\t\t'contacts': contacts['contacts'],\n\t\t\t'cancel': True, \n\t\t\t'delivery_id': delivery.id, \n\t\t\t'next_view': next_view,\n\t\t})\n\treturn render(request, 'delivery_base.html', {\n\t\t'header_text': 'Confirm destination',\n\t\t'cancel': True,\n\t\t'next_view': next_view,\n\t\t'delivery_summary': True\n\t})\n\t\n@login_required\ndef delivery_mark_partnered_item_picked_up(request, delivery_id, partnered_item_id):\n\titem = get_object_or_404(PartneredItem, pk=partnered_item_id)\n\tnext_view = request.session['redirect_to']\n\titem.time_out_for_delivery = timezone.now()\n\titem.save()\n\treturn redirect(next_view)\n\n@login_required\ndef delivery_mark_non_partnered_item_picked_up(request, delivery_id, non_partnered_item_id):\n\titem = get_object_or_404(NonPartneredItem, pk=non_partnered_item_id)\n\tnext_view = request.session['redirect_to']\n\titem.time_out_for_delivery = timezone.now()\n\titem.save()\n\treturn redirect(next_view)\n\n@login_required\t\ndef delivery_at_pickup(request, delivery_id):\n\tdelivery = get_object_or_404(Delivery, pk=delivery_id)\n\trequest.session['redirect_to'] = reverse('delivery_continue', args=(delivery.id,))\n\theader_text = '%s Delivery at Pickup' % delivery.delivery_type\n\t\n\thead_out = False\n\tconfirm_contact = False\n\t\n\tif delivery.confirmed_destination == None:\n\t\tconfirm_contact = True\n\t\trequest.session['sc_guess_with'] = delivery.temp_destination_address\n\telif delivery.ready_for_delivery and \\\n\t\tdelivery.confirmed_destination != None and \\\n\t\tdelivery.delivery_type == 'Non-Partnered':\n\t\treturn redirect('delivery_out_for_delivery', delivery_id=delivery.id)\n\t\t\n\tnot_ready_partnered_items = []\n\tnot_ready_non_partnered_items = []\n\tpartnered_item_mark_as_picked_up = []\n\tfor part in delivery.partnered_items.iterator():\n\t\tif part.ready_for_delivery == False:\n\t\t\tnot_ready_partnered_items.append(part)\n\t\telif part.picked_up == False:\n\t\t\tpartnered_item_mark_as_picked_up.append(part)\n\tfor non in delivery.non_partnered_items.iterator():\n\t\tif non.ready_for_delivery == False:\n\t\t\tnot_ready_non_partnered_items.append(non)\n\t\t\n\treturn render(request, 'delivery_at_pickup.html', {\n\t\t'header_text': header_text,\n\t\t'cancel': True, \n\t\t'confirm_contact': confirm_contact, \n\t\t'next_view': request.session['redirect_to'], \n\t\t'delivery': delivery,\n\t\t'delivery_summary': True,\n\t\t'not_ready_partnered_items': not_ready_partnered_items,\n\t\t'not_ready_non_partnered_items': not_ready_non_partnered_items,\n\t\t'partnered_item_mark_as_picked_up': partnered_item_mark_as_picked_up,\n\t})\n'''\n@login_required\ndef delivery_partnered_at_pickup(request, delivery_id):\n\tdelivery = get_object_or_404(Delivery, pk=delivery_id)\n\trequest.session['redirect_to'] = reverse('delivery_continue', args=(delivery.id,))\n\tpartnered_items = delivery.partnered_items.all()\n\thead_out = False\n\tconfirm_contact = False\n\tif delivery.confirmed_destination == None:\n\t\tconfirm_contact = True\n\tif delivery.ready_for_delivery and delivery.confirmed_destination != None:\n\t\thead_out = True\n\treturn render(request, 'delivery_at_pickup.html', {\n\t\t'header_text': 'Partnered Delivery at Pickup',\n\t\t'cancel': True, 'head_out': head_out, \n\t\t'confirm_contact': confirm_contact, \n\t\t'add_item': not delivery.ready_for_delivery,\n\t\t'next_view': reverse('delivery_partnered_at_pickup', args=(delivery.id,)), \n\t\t'delivery_id': delivery_id,\n\t\t'delivery': delivery,\n\t\t'delivery_summary': True,\n\t})\n\n@login_required\ndef delivery_non_partnered_at_pickup(request, delivery_id):\n\tdelivery = get_object_or_404(Delivery, pk=delivery_id)\n\tnon_partnered_items = delivery.non_partnered_items.all()\n\thead_out = False\n\tconfirm_contact = False\n\tif delivery.confirmed_destination == None:\n\t\tconfirm_contact = True\n\tif delivery.ready_for_delivery and delivery.confirmed_destination != None:\n\t\treturn redirect('delivery_out_for_delivery', delivery_id=delivery.id)\n\treturn render(request, 'delivery_at_pickup.html', {\n\t\t'header_text': 'Non-Partnered Delivery at Pickup',\n\t\t'cancel': True, \n\t\t'head_out': head_out, \n\t\t'confirm_contact': confirm_contact, \n\t\t'add_item': not delivery.ready_for_delivery,\n\t\t'next_view': reverse('delivery_non_partnered_at_pickup', args=(delivery.id,)), \n\t\t'delivery_id': delivery_id,\n\t\t'delivery': delivery,\n\t\t'delivery_summary': True,\n\t})\n'''\n\n@login_required\ndef delivery_start_non_partnered(request):\n\tbutton = 'Search'\n\tform = SearchForm(request.POST or None)\n\t# if form is valid search SimpleContacts for phone number match\n\tif form.is_valid():\n\t\tdelivery = Delivery.objects.create()\n\t\trequest.session['redirect_to'] = reverse('delivery_continue', args=(delivery.id,))\n\t\trequest.session['starting_non_partered_delivery'] = delivery.id\n\t\trequest.session['sc_guess_with'] = guess_with=request.POST['search']\n\t\treturn redirect('simple_contact_guess',)\n\n\treturn render(request, 'delivery_base.html', {\n\t\t'header_text': 'Start a non-partnered delivery',\n\t\t'cancel': True,\n\t\t'next_view': reverse('delivery_manager'),\n\t\t'url': reverse('delivery_start_non_partnered'),\n\t\t'form': form,\n\t\t'save_button_text': 'Search for destination contact',\n\t})\n\t\n@login_required\ndef delivery_start_partnered(request):\n\tform = StartPartneredDeliveryForm(request.POST or None, initial={\n\t\t'order_start_time': NOW})\n\tif form.is_valid():\n\t\tdelivery = Delivery()\n\t\tdelivery.order_start_time = timezone.now()\n\t\tdelivery.temp_destination_address = request.POST['destination_address']\n\t\tdelivery.notes = request.POST.get('notes')\n\t\tpartner = PartneredBusiness.objects.get(pk=request.POST['partnered_business'])\n\t\tdate = datetime.date.today()\n\t\tif request.POST['order_will_be_ready_in'] == 'enter':\n\t\t\ttime = datetime.datetime.strptime(\n\t\t\t\trequest.POST.get('specific_time_order_ready'), '%I:%M %p'\n\t\t\t).time()\n\t\t\tprint('time: %s' % time)\n\t\t\tdate = datetime.date.today()\n\t\t\titem_ready_time = datetime.datetime.combine(date, time)\n\t\telse:\n\t\t\tminutes = datetime.timedelta(minutes=int(request.POST['order_will_be_ready_in']))\n\t\t\titem_ready_time = datetime.datetime.now() + minutes\n\t\tpartnered_item = PartneredItem.objects.create(\n\t\t\tfrom_partnered_business = partner, \n\t\t\titem_ready_time = item_ready_time,\n\t\t)\n\t\tdelivery.save()\n\t\tdelivery.partnered_items.add(partnered_item)\n\t\tdelivery.assign_delivery_fees()\n\t\treturn redirect('delivery_manager')\n\treturn render(request, 'delivery_base.html', {\n\t\t'header_text': 'Start a partnered delivery',\n\t\t'cancel': True, \n\t\t'url': reverse('delivery_start_partnered'),\n\t\t'form': form,\n\t\t'save_button_text': 'Start this delivery',\n\t})\n\t\n@login_required\ndef delivery_at_destination(request, delivery_id):\n\ttime = timezone.now()\n\tdelivery = get_object_or_404(Delivery, pk=delivery_id)\n\tfor non in delivery.non_partnered_items.iterator():\n\t\tnon.time_delivered = time\n\t\tnon.save()\n\tfor part in delivery.partnered_items.iterator():\n\t\tpart.time_delivered = time\n\t\tpart.save()\n\tdelivery.time_delivered = time\n\tdelivery.save()\n\treturn redirect('delivery_continue', delivery_id)\n\n@login_required\ndef delivery_out_for_delivery(request, delivery_id):\n\tdelivery = get_object_or_404(Delivery, pk=delivery_id)\n\tmessage = \"Delivery from %s to %s\" % (delivery.delivery_from, delivery.destination)\n\treturn render(request, 'delivery_out_for_delivery.html', {\n\t\t'header_text': 'Out for delivery',\n\t\t'delivery': delivery,\n\t\t'message': message,\n\t\t'delivery_summary': True,\n\t})\n\t\n@login_required\ndef delivery_continue(request, delivery_id):\n\tdelivery = get_object_or_404(Delivery, pk=delivery_id)\n\tif 'simple_contact_found' in request.session:\n\t\tdelivery.confirmed_destination = SimpleContact.objects.get(pk=request.session.pop('simple_contact_found'))\n\t\tdelivery.save()\n\t\tif 'starting_non_partered_delivery' in request.session:\n\t\t\tdel request.session['starting_non_partered_delivery']\n\t\t\treturn redirect('non_partnered_item_add', delivery.id)\n\tif delivery.status == 'Unclaimed':\n\t\tif delivery.delivery_type == None:\n\t\t\treturn redirect('non_partnered_item_add', delivery.id)\n\t\treturn redirect('delivery_manager')\n\telif delivery.status == 'Claimed' or 'Picked up' in delivery.status \\\n\tor delivery.confirmed_destination == None:\n\t\treturn redirect('delivery_at_pickup', delivery.id)\n\telif delivery.status == 'Out for delivery':\n\t\treturn redirect('delivery_out_for_delivery', delivery.id)\n\telif delivery.status == 'Delivered':\n\t\treturn redirect('payment_add', delivery.id)\n\treturn redirect('delivery_manager')\n\n@login_required\ndef delivery_claim(request, delivery_id, cont=False):\n\tdelivery = get_object_or_404(Delivery, pk=delivery_id)\n\tdelivery.delivery_claimed_by = request.user\n\tdelivery.save()\n\tif cont == False:\n\t\treturn redirect('delivery_manager')\n\telse:\n\t\treturn redirect('delivery_continue', delivery_id)\n\t\n@login_required\ndef delivery_edit_notes(request, delivery_id, redirect_to='delivery_manager'):\n\tdelivery = get_object_or_404(Delivery, pk=delivery_id)\n\tnext_view = get_redirect_to(request, redirect_to)\n\tform = DeliveryNoteEditForm(request.POST or None, instance=delivery)\n\tif form.is_valid():\n\t\tform.save()\n\t\treturn redirect(next_view)\n\treturn render(request, 'delivery_base.html', {\n\t'header_text': 'Edit delivery notes',\n\t'cancel': True,\n\t'next_view': next_view,\n\t'form': form, \n\t'save_button_text': 'Save note changes',\n\t'next_view': next_view,\n\t'delivery': delivery,\n\t'delivery_summary': True,\n})\n\n@login_required\ndef delivery_override_delivery_fees(request, delivery_id, redirect_to='delivery_manager'):\n\tdelivery = get_object_or_404(Delivery, pk=delivery_id)\n\tnext_view = get_redirect_to(request, redirect_to)\n\tform = DeliveryOverrideDeliveryFeesModelForm(request.POST or None, instance=delivery)\n\tif form.is_valid():\n\t\tform.save()\n\t\tdelivery.assign_delivery_fees()\n\t\treturn redirect(next_view)\n\treturn render(request, 'delivery_base.html', {\n\t'header_text': 'Override delivery fees',\n\t'cancel': True,\n\t'next_view': next_view,\n\t'form': form, \n\t'save_button_text': 'Save delivery fee changes',\n\t'next_view': next_view,\n\t'delivery': delivery,\n\t'delivery_summary': True,\n})\n\t\n@login_required\ndef delivery_transfer(request, delivery_id):\n\tdelivery = get_object_or_404(Delivery, pk=delivery_id)\n\tform = DeliveryTransferForm(request.POST or None)\n\tif form.is_valid():\n\t\tif request.POST['transfer_delivery_to'] == '-1':\n\t\t\tuser = None\n\t\telse:\n\t\t\tuser = User.objects.get(pk=request.POST['transfer_delivery_to'])\n\t\tdelivery.delivery_claimed_by=user\n\t\tdelivery.save()\n\t\treturn redirect('delivery_manager')\n\treturn render(request, 'delivery_base.html', {\n\t\t'header_text': 'Transfer delivery',\n\t\t'cancel': True, \n\t\t'url': reverse('delivery_transfer', args=(delivery.id,)),\n\t\t'delivery': delivery,\n\t\t'delivery_summary': True,\n\t\t'form': form,\n\t\t'save_button_text': 'Confirm transfer',\n\t})\n\n@login_required\ndef delivery_view(request, delivery_id):\n\tdelivery = get_object_or_404(Delivery, pk=delivery_id)\n\trequest.session['redirect_to'] = reverse('delivery_view', args=(delivery.id,))\n\treturn render(request, 'delivery_view.html', {\n\t\t'header_text': 'Edit/View Delivery',\n\t\t'delivery': delivery,\n\t})\n\t\n@login_required\ndef delivery_delete(request, delivery_id):\n\tdelivery = get_object_or_404(Delivery, pk=delivery_id)\n\tif request.method == \"POST\":\n\t\tdelivery.delete()\n\t\treturn redirect('delivery_manager')\n\treturn render(request, 'delivery_confirm_delete.html', {\n\t\t\t'header_text': 'Delete delivery',\n\t\t\t'cancel': True,\n\t\t\t'object': delivery,\n\t\t})\n","sub_path":"day/views/delivery_views.py","file_name":"delivery_views.py","file_ext":"py","file_size_in_byte":19466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"409859799","text":"from flask import Flask, request\nfrom flask_cors import CORS\n\nimport base64\nimport requests\nimport util\nimport json\nimport os\n\napp = Flask(__name__)\nCORS(app)\n\nhbase_base_url = os.environ.get('HBASE_BASE_URL','http://localhost:4200')\napp_port = int(os.environ.get('APP_PORT',3004))\n\n@app.route(\"/ping\", methods=['GET'])\ndef ping():\n return \"pong\"\n\n@app.route(\"//\", methods=['GET'])\ndef table_rows(tablename, row):\n response = requests.get(hbase_base_url + request.full_path, headers={\"Accept\" : \"application/json\"})\n \n rows = list()\n if not util.is_successful(response):\n print(\"not successful\\n\")\n return json.dumps(rows)\n \n response_text = json.loads(response.text)\n for row in response_text['Row']: \n row_key = base64.b64decode(row['key'])\n dataset = dict()\n dataset[\"key\"]=row_key\n for cell in row['Cell']:\n columnname = base64.b64decode(cell['column'])\n value = cell['$']\n if value == None:\n continue \n dataset[columnname]=base64.b64decode(value)\n rows.append(dataset)\n \n return json.dumps(rows) \n\n\nif __name__ == '__main__':\n app.run(debug=True,host='0.0.0.0',port=app_port)","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"373856433","text":"# 计算旅行者路径的长度\r\n# 起点0\r\n# 走了3步,result = 3\r\n# 又走了5步,result = 8\r\n# 又走了6步,result = 14\r\n\r\norigin = 0\r\n\r\ndef go(step):\r\n global origin\r\n new_pos = origin + step\r\n origin = new_pos\r\n return origin\r\n\r\nprint(go(3))\r\nprint(go(5))\r\nprint(go(6))\r\n\r\n\r\n\r\n","sub_path":"python/eleven/c13.py","file_name":"c13.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"81725784","text":"import argparse\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nimport pytorch_lightning as pl\nfrom sklearn.metrics import f1_score, matthews_corrcoef\n\nfrom .structmodel import GAT\n\nclass GatLit(GAT, pl.LightningModule):\n def __init__(self, hparams):\n if not hasattr(hparams, '__dict__'):\n hparams = argparse.Namespace(**hparams)\n super().__init__(**vars(hparams))\n self.hparams = hparams\n \n\n \n def training_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self(x)\n loss = FocalLoss2(size_average=self.hparams.loss_size_avarage)(y_hat, y)\n #loss = torch.nn.CrossEntropyLoss()(y_hat, y)\n progress_bar = {'lr' : self.hparams.learning_rate}\n return {\"loss\" : loss, 'progress_bar' : progress_bar }\n \n def configure_optimizers(self):\n \n optimizer = torch.optim.Adam(self.parameters(),\n lr=self.hparams.learning_rate,\n weight_decay=self.hparams.reg_term,\n amsgrad=self.hparams.use_amsgrad)\n scheduler = { 'scheduler' : ReduceLROnPlateau(optimizer, 'max',\n patience=self.hparams.plateau_patience,\n factor=0.2, verbose =True),\n\n 'monitor' : 'f1_score',\n 'interval': 'epoch',\n 'frequency' : 1,\n 'name' : 'LRScheduler'\n }\n return [optimizer], [scheduler]\n \n def validation_step(self, batch, batch_idx):\n \n x, y = batch\n y_hat = self(x)\n loss = FocalLoss2(size_average=self.hparams.loss_size_avarage)(y_hat, y)\n #loss = torch.nn.CrossEntropyLoss()(y_hat, y)\n\n return {'val_loss' : loss, 'y_hat' : y_hat, 'y_true' : y}\n \n def validation_epoch_end(self, outputs):\n val_set_loss = [x['val_loss'] for x in outputs]\n avg_loss = torch.stack(val_set_loss).mean()\n y_hat = [x['y_hat'].view(-1, self.hparams.n_classes) for x in outputs]\n y = [x['y_true'] for x in outputs]\n \n y_hat = torch.cat(y_hat, dim=0)\n y = torch.cat(y).view(-1)\n \n y_hat = y_hat.cpu().detach().numpy()\n assert not (y_hat > 1).any(), y_hat\n y_hat = y_hat.argmax(1)\n y = y.cpu().detach().numpy()\n f1 = f1_score(y, y_hat, average=self.hparams.f1_type)\n f1 = f1.mean()\n f1_loss = f1/(avg_loss + 1e-3)\n \n #matt = matthews_corrcoef(y, y_hat)\n #print(f'{self.hparams.f1_type} val f1 {f1:.3f} f1_loss {f1_loss:.3f} loss {avg_loss:.3f} lr {self.hparams.learning_rate:.5f}')\n #self.log('val_loss', avg_loss)\n progress_bar = {f'f1_{self.hparams.f1_type}' : f1, 'val_loss' : avg_loss}\n return {'val_loss' : avg_loss, 'f1_score' : f1, 'f1_loss' : f1_loss, 'progress_bar' : progress_bar}\n \n def test_step(self, batch, batch_idx):\n return self.validation_step(batch, batch_idx)\n \n def test_epoch_end(self, outputs):\n logs = self.validation_epoch_end(outputs)\n logs_ = dict(test_f1 = logs['f1_score'],\n test_loss = logs['val_loss'].item(),\n test_matt = logs['matt'])\n return logs_\n \n\n \n \n\nclass FocalLoss2(nn.Module):\n def __init__(self, gamma=0, alpha=None, size_average=True):\n super(FocalLoss2, self).__init__()\n self.gamma = gamma\n self.alpha = alpha\n if isinstance(alpha,(float,int)): self.alpha = torch.Tensor([alpha,1-alpha])\n if isinstance(alpha,list): self.alpha = torch.Tensor(alpha)\n self.size_average = size_average\n\n def forward(self, input, target):\n if input.dim()>2:\n input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W\n input = input.transpose(1,2) # N,C,H*W => N,H*W,C\n input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C\n target = target.view(-1,1)\n\n logpt = F.log_softmax(input)\n logpt = logpt.gather(1,target)\n logpt = logpt.view(-1)\n pt = Variable(logpt.data.exp())\n\n if self.alpha is not None:\n if self.alpha.type()!=input.data.type():\n self.alpha = self.alpha.type_as(input.data)\n at = self.alpha.gather(0,target.data.view(-1))\n logpt = logpt * Variable(at)\n\n loss = -1 * (1-pt)**self.gamma * logpt\n if self.size_average: return loss.mean()\n else: return loss.sum()","sub_path":"rossmann_toolbox/models/structwrapper.py","file_name":"structwrapper.py","file_ext":"py","file_size_in_byte":4676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"146578327","text":"import tensorflow as tf\n\n\ndef _create_conv2_block(model, net, filters, layer_number):\n net = tf.layers.conv2d(inputs=net, filters=filters, kernel_size=(3, 3), strides=(1, 1), name=\"conv%d_1\" % layer_number,\n activation=tf.nn.relu, padding=\"same\")\n model[\"vgg16/conv%d_1\" % layer_number] = net\n net = tf.layers.conv2d(inputs=net, filters=filters, kernel_size=(3, 3), strides=(1, 1), name=\"conv%d_2\" % layer_number,\n activation=tf.nn.relu, padding=\"same\")\n model[\"vgg16/conv%d_2\" % layer_number] = net\n net = tf.layers.max_pooling2d(inputs=net, pool_size=(2, 2), strides=(2, 2), padding=\"same\", name=\"pool%d\" % layer_number)\n model[\"vgg16/pool%d\" % layer_number] = net\n return net\n\n\ndef _create_conv3_block(model, net, filters, layer_number):\n net = tf.layers.conv2d(inputs=net, filters=filters, kernel_size=(3, 3), strides=(1, 1), name=\"conv%d_1\" % layer_number,\n activation=tf.nn.relu, padding=\"same\")\n model[\"vgg16/conv%d_1\" % layer_number] = net\n net = tf.layers.conv2d(inputs=net, filters=filters, kernel_size=(3, 3), strides=(1, 1), name=\"conv%d_2\" % layer_number,\n activation=tf.nn.relu, padding=\"same\")\n model[\"vgg16/conv%d_2\" % layer_number] = net\n net = tf.layers.conv2d(inputs=net, filters=filters, kernel_size=(3, 3), strides=(1, 1), name=\"conv%d_3\" % layer_number,\n activation=tf.nn.relu, padding=\"same\")\n model[\"vgg16/conv%d_3\" % layer_number] = net\n net = tf.layers.max_pooling2d(inputs=net, pool_size=(2, 2), strides=(2, 2), padding=\"same\", name=\"pool%d\" % layer_number)\n model[\"vgg16/pool%d\" % layer_number] = net\n return net\n\n\ndef create_model(input_tensor, mode, hyper_params):\n \"\"\"\n A full reference model of vgg16 without pretrained weights.\n\n This uses the layers api and is optimized for tensorflow.\n :param input_tensor: The input tensor dict containing a \"image\" rgb tensor.\n :param mode: Execution mode as a tf.estimator.ModeKeys\n :param hyper_params: The hyper param file. \"vgg16\" : {\"encoder_only\": Boolean}\n :return: A dictionary containing all output tensors.\n \"\"\"\n model = {}\n with tf.variable_scope('vgg16') as scope:\n net = tf.cast(input_tensor[\"image\"], dtype=tf.float32, name=\"input/cast\")\n model[\"image\"] = net\n mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')\n net = net - mean\n model[\"image-normalized\"] = net\n\n net = _create_conv2_block(model, net, filters=64, layer_number=1)\n net = _create_conv2_block(model, net, filters=128, layer_number=2)\n net = _create_conv3_block(model, net, filters=256, layer_number=3)\n net = _create_conv3_block(model, net, filters=512, layer_number=4)\n net = _create_conv3_block(model, net, filters=512, layer_number=5)\n print(net.get_shape())\n\n if not hyper_params.vgg16.encoder_only:\n net = tf.layers.conv2d(inputs=net, filters=4096, kernel_size=(7, 7), strides=(1, 1), name=\"fc1\", activation=tf.nn.relu)\n model[\"vgg16/fc1\"] = net\n net = tf.layers.conv2d(inputs=net, filters=4096, kernel_size=(1, 1), strides=(1, 1), name=\"fc2\", activation=tf.nn.relu)\n model[\"vgg16/fc2\"] = net\n net = tf.layers.conv2d(inputs=net, filters=1000, kernel_size=(1, 1), strides=(1, 1), name=\"logits\", activation=None)\n model[\"logits\"] = net\n net = tf.nn.softmax(net)\n model[\"probs\"] = net\n return model\n","sub_path":"starttf/models/vgg16.py","file_name":"vgg16.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"22945003","text":"from base_handler import *\nfrom models.Article import *\nimport datetime\nimport logging\nfrom google.appengine.api import users\nimport re\n\narticles_per_page = 10\n\nclass BlogPage(BaseHandler):\n\tdef get(self):\n\t\t# Blog page displays a list of blogs\n\t\t# limits the number of articles to 10\n\t\tparams = self.get_params_dict(['offset', 'limit'])\n\t\ttry:\n\t\t\tparams['offset'] = int(params['offset'])\n\t\texcept ValueError:\n\t\t\tparams['offset'] = 0\n\t\tarticles = Article.get_eq(offset=params['offset'], limit = articles_per_page)\n\t\tfor article in articles:\n\t\t\t# strip html tags and limit the article size to show as a list\n\t\t\tstripped_content = strip_tags(article.content)\n\t\t\tif len(stripped_content) > 250:\n\t\t\t\tarticle._content = stripped_content[:200] + '...'\n\t\t\telse:\n\t\t\t\tarticle._content = stripped_content\n\t\t\tarticle._id = article.key()\n\t\t\tarticle._created = article.created.strftime('%A, %B %d, %Y')\n\n\t\t# calculate previous and next page's offsets to create links\n\t\tprevious_offset = params['offset'] - articles_per_page\n\t\tnext_offset = params['offset'] + articles_per_page\n\t\tif len(articles) < articles_per_page: next_offset = None\n\t\tif previous_offset < 0: previous_offset = None\n\t\tself.render('blog.html', {\n\t\t\t'pageTitle': 'BLOG',\n\t\t\t'articles': articles,\n\t\t\t'previous_offset': previous_offset,\n\t\t\t'next_offset': next_offset\n\t\t\t})\n\nclass BlogArticlePage(BaseHandler):\n\tdef get(self, article_id):\n\t\t# Displays full article.\n\t\tarticle = Article.get(article_id)\n\t\tif not article:\n\t\t\tself.render('404.html')\n\t\t\treturn\n\t\tarticle._id = article.key().id()\n\t\tarticle._created = article.created.strftime('%A, %B %d, %Y')\n\t\tself.render('blogArticle.html', {\n\t\t\t'pageTitle': 'BLOG',\n\t\t\t'subTitle': article.title,\n\t\t\t'article': article\n\t\t\t})\n\ndef minifyTags(raw_tags):\n\t# check for redundant tags and white spaces.\n\ttags = []\n\tfor raw_tag in raw_tags:\n\t\traw_tag = raw_tag.strip().lower()\n\t\tif raw_tag != \"\" and raw_tag not in tags:\n\t\t\ttags.append(raw_tag)\n\treturn tags\n\nclass BlogAdminPage(BaseHandler):\n\t# Admin Page for blog\n\tdef get(self):\n\t\tuser = users.get_current_user()\n\t\tif user:\n\t\t\tif users.is_current_user_admin():\n\t\t\t\tself.render('blogAdmin.html', {\n\t\t\t\t\t'pageTitle': 'BLOG ADMIN',\n\t\t\t\t\t'logoutUrl': users.create_logout_url('/blog')\n\t\t\t\t\t})\n\t\t\t\treturn\n\t\tself.write(('Sign in or register.' %users.create_login_url('/blog/admin')))\n\n\tdef post(self):\n\t\tuser = users.get_current_user()\n\t\tif user and users.is_current_user_admin():\n\t\t\tparams = self.get_params_dict(['title', 'pubDate', 'content', 'tags', 'image'])\n\n\t\t\ttitle_for_tags = re.sub(r'[^\\w]', ' ', params['title'])\n\t\t\traw_tags = params['tags'].split(',')\n\t\t\ttags = minifyTags(raw_tags)\n\n\t\t\tarticle = Article(\n\t\t\t\ttitle = force_unicode(params['title']),\n\t\t\t\tcontent = force_unicode(params['content']),\n\t\t\t\ttags = tags,\n\t\t\t\timage = params['image']\n\t\t\t\t)\n\t\t\tarticle.store()\n\n\t\t\t# Tags are stored in article as provided by the publisher.\n\t\t\t# However, when actual mapping is created, the title is \n\t\t\t# taken into account.\n\t\t\ttags = minifyTags(tags + title_for_tags.split(' '))\n\t\t\tfor tag in tags:\n\t\t\t\tarticle_tag = ArticleTag.get(tag)\n\t\t\t\tif not article_tag:\n\t\t\t\t\tarticle_tag = ArticleTag(\n\t\t\t\t\t\tkey_name = tag,\n\t\t\t\t\t\tarticles = [article.key()]\n\t\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\tarticle_tag.articles.append(article.key())\n\t\t\t\tarticle_tag.store()\n\n\t\t\tself.render('blogAdmin.html', {\n\t\t\t\t'pageTitle': 'BLOG ADMIN',\n\t\t\t\t'logoutUrl': users.create_logout_url('/blog'),\n\t\t\t\t'message': 'success'\n\t\t\t\t})\n\t\telse:\n\t\t\tself.write(\"Unauthorized.\")\n","sub_path":"controllers/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"619919332","text":"# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport dace\nimport numpy as np\nimport pytest\nimport cupy as cp\nfrom dace.dtypes import ScheduleType, StorageType\nfrom dace.sdfg import nodes, SDFG, SDFGState\nfrom dace.data import Scalar\nfrom dace.transformation.dataflow import RedundantArray, RedundantSecondArray, RedundantArrayCopying\n\nimport dace.libraries.blas\n\n# Define symbolic sizes for arbitrary inputs\nM = dace.symbol('M')\nK = dace.symbol('K')\nN = dace.symbol('N')\nL = dace.symbol('L')\nO = dace.symbol('O')\n\n# Define data type to use\ndtype = dace.float64\nnp_dtype = np.float64\n\n#####################################################################\n\n\n@dace.program\ndef matmul_lib(A: dtype[M, K], B: dtype[K, N]):\n return A @ B\n\n\n@dace.program\ndef three_matmul(A: dtype[M, K], B: dtype[K, N], C: dtype[N, L], D: dtype[L,\n O]):\n M1 = matmul_lib(A, B)\n M2 = matmul_lib(C, D)\n return matmul_lib(M1, M2)\n\n\n@dace.program\ndef three_matmul_debug(A: dtype[M, K], B: dtype[K, N], C: dtype[N, L],\n D: dtype[L, O]):\n M1 = matmul_lib(A, B)\n M2 = matmul_lib(C, D)\n return matmul_lib(M1, M2)\n\n\ndef find_node(sdfg: dace.SDFG, label: str) -> dace.nodes.Node:\n for n, _ in sdfg.all_nodes_recursive():\n if n.label == label:\n return n\n\n\n@pytest.mark.multigpu\ndef test_three_matmul_pure():\n gpuHelper = 1\n gpuMain = 0\n dace.libraries.blas.default_implementation = 'pure'\n\n sdfg: dace.SDFG = three_matmul.to_sdfg()\n sdfg.name = 'gpu_p2p'\n sdfg.apply_gpu_transformations()\n ss = sdfg.start_state\n\n sdfg.arrays['gpu_A'].location = {'gpu': gpuMain}\n sdfg.arrays['gpu_A'].storage = StorageType.GPU_Global\n sdfg.arrays['gpu_B'].location = {'gpu': gpuMain}\n sdfg.arrays['gpu_B'].storage = StorageType.GPU_Global\n sdfg.arrays['A'].location = {'gpu': gpuMain}\n sdfg.arrays['A'].storage = StorageType.GPU_Global\n sdfg.arrays['B'].location = {'gpu': gpuMain}\n sdfg.arrays['B'].storage = StorageType.GPU_Global\n sdfg.arrays['M1'].location = {'gpu': gpuMain}\n sdfg.arrays['M1'].storage = StorageType.GPU_Global\n sdfg.arrays['M2'].location = {'gpu': gpuMain}\n sdfg.arrays['M2'].storage = StorageType.GPU_Global\n sdfg.arrays['gpu___return'].location = {'gpu': gpuMain}\n sdfg.arrays['gpu___return'].storage = StorageType.GPU_Global\n\n sdfg.arrays['gpu_C'].location = {'gpu': gpuHelper}\n sdfg.arrays['gpu_C'].storage = StorageType.GPU_Global\n sdfg.arrays['gpu_D'].location = {'gpu': gpuHelper}\n sdfg.arrays['gpu_D'].storage = StorageType.GPU_Global\n sdfg.arrays['C'].location = {'gpu': gpuHelper}\n sdfg.arrays['C'].storage = StorageType.GPU_Global\n sdfg.arrays['D'].location = {'gpu': gpuHelper}\n sdfg.arrays['D'].storage = StorageType.GPU_Global\n\n m1 = find_node(sdfg, 'M1')\n ss.predecessors(m1)[0].location = {'gpu': gpuMain}\n ss.successors(m1)[0].location = {'gpu': gpuMain}\n m2 = find_node(sdfg, 'M2')\n ss.predecessors(m2)[0].location = {'gpu': gpuHelper}\n\n sdfg.expand_library_nodes()\n sdfg.apply_strict_transformations()\n sdfg.apply_transformations_repeated(\n [RedundantSecondArray, RedundantArray, RedundantArrayCopying])\n cp.random.seed(0)\n m = 100\n k = 200\n n = 300\n l = 900\n o = 777\n with cp.cuda.Device(0):\n A = cp.random.rand([m, k], dtype=np_dtype)\n B = cp.random.rand([k, n], dtype=np_dtype)\n with cp.cuda.Device(1):\n C = cp.random.rand([n, l], dtype=np_dtype)\n D = cp.random.rand([l, o], dtype=np_dtype)\n # A[:] = np.random.rand(m, k)[:]\n # B[:] = np.random.rand(k, n)[:]\n # C[:] = np.random.rand(n, l)[:]\n # D[:] = np.random.rand(l, o)[:]\n\n print('GPU start')\n E = sdfg(A=A, B=B, C=C, D=D, M=m, K=k, N=n, L=l, O=o)\n print('GPU done')\n\n res = (A @ B) @ (C @ D)\n idx = list(zip(*np.where(~np.isclose(E, res, atol=0, rtol=1e-7))))\n numErrors = len(idx)\n if numErrors > 0:\n print(\"number of errors:\", numErrors)\n if numErrors < 100:\n for i in idx:\n print(i, E[i], res[i])\n assert np.allclose(E, res)\n\n # program_objects = sdfg.generate_code()\n # from dace.codegen import compiler\n # out_path = '.dacecache/local/cudastreams/'+sdfg.name\n # program_folder = compiler.generate_program_folder(sdfg, program_objects,\n # out_path)\n\n\n@pytest.mark.multigpu\ndef test_three_matmul():\n gpuHelper = 1\n gpuMain = 0\n dace.libraries.blas.default_implementation = 'cuBLAS'\n\n sdfg: dace.SDFG = three_matmul.to_sdfg()\n sdfg.name = 'gpu_p2p'\n sdfg.expand_library_nodes()\n\n state = sdfg.start_state\n output = state.sink_nodes()[0]\n mM1M2sdfg = state.predecessors(output)[0]\n m1 = state.predecessors(mM1M2sdfg)[0]\n m2 = state.predecessors(mM1M2sdfg)[1]\n mABsdfg = state.predecessors(m1)[0]\n mCDsdfg = state.predecessors(m2)[0]\n\n mABsdfg.location = {'gpu': gpuMain}\n mABsdfg.schedule = ScheduleType.GPU_Device\n\n mCDsdfg.location = {'gpu': gpuHelper}\n mCDsdfg.schedule = ScheduleType.GPU_Device\n\n mM1M2sdfg.location = {'gpu': gpuMain}\n mM1M2sdfg.schedule = ScheduleType.GPU_Device\n\n sdfg.arrays['M1'].location = {'gpu': gpuMain}\n sdfg.arrays['M1'].storage = StorageType.GPU_Global\n sdfg.arrays['M2'].location = {'gpu': gpuMain}\n sdfg.arrays['M2'].storage = StorageType.GPU_Global\n\n sdfg.apply_strict_transformations()\n sdfg.apply_transformations_repeated(\n [RedundantSecondArray, RedundantArray, RedundantArrayCopying])\n np.random.seed(0)\n m = 1024\n k = 2000\n n = 3000\n l = 900\n o = 7777\n A = np.ndarray(shape=[m, k], dtype=np_dtype)\n B = np.ndarray(shape=[k, n], dtype=np_dtype)\n C = np.ndarray(shape=[n, l], dtype=np_dtype)\n D = np.ndarray(shape=[l, o], dtype=np_dtype)\n A[:] = np.random.rand(m, k)[:]\n B[:] = np.random.rand(k, n)[:]\n C[:] = np.random.rand(n, l)[:]\n D[:] = np.random.rand(l, o)[:]\n\n E = sdfg(A=A, B=B, C=C, D=D, M=m, K=k, N=n, L=l, O=o)\n res = (A @ B) @ (C @ D)\n idx = list(zip(*np.where(~np.isclose(E, res, atol=0, rtol=1e-7))))\n numErrors = len(idx)\n if numErrors > 0:\n print(\"number of errors:\", numErrors)\n if numErrors < 100:\n for i in idx:\n print(i, E[i], res[i])\n assert np.allclose(E, res)\n\n # program_objects = sdfg.generate_code()\n # from dace.codegen import compiler\n # out_path = '.dacecache/local/cudastreams/'+sdfg.name\n # program_folder = compiler.generate_program_folder(sdfg, program_objects,\n # out_path)\n\n\nif __name__ == \"__main__\":\n test_three_matmul_pure()\n # test_three_matmul()\n","sub_path":"tests/gpu_multi/cudastreams/three_matmul_test.py","file_name":"three_matmul_test.py","file_ext":"py","file_size_in_byte":6782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"24438061","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Sep 5 20:53:37 2020\r\n\r\n@author: 56977\r\n\"\"\"\r\n\r\nimport matplotlib.pylab as plt\r\nfrom scipy.integrate import odeint\r\nfrom leer_eof import leer_eof\r\nimport numpy as np\r\nfrom time import perf_counter\r\n\r\nfname = \"S1B_OPER_AUX_POEORB_OPOD_20200828T111242_V20200807T225942_20200809T005942.EOF\"\r\n\r\nsat_t, sat_x, sat_y, sat_z, sat_vx, sat_vy, sat_vz = leer_eof(fname)\r\n\r\ncorreccion = 0\r\n\r\ntmax = max(sat_t)\r\n\r\nKm = 1000.\r\nomega = -7.2921150e-5\r\nKm3 = (1000.)**3\r\nKm5 = (1000.)**5\r\nKm6 = (1000.)**6\r\nmu = 398600.440*Km3 #G*Mtierra\r\nJ2 = 1.75553e10*Km5\r\nJ3 = -2.61913e11*Km6\r\n#Nt = 9000\r\n#dt = 10.\r\nRadio = 6371.*Km\r\nH0 = 700.*Km\r\n\r\n\r\ndef eulerint(zpunto,z0,t,Nsub=1):\r\n Nt = len(t)\r\n Ndim = len(z0)\r\n \r\n z = np.zeros((Nt,Ndim))\r\n z[0,:]=z0\r\n \r\n #z(i+1)=zp:i*dt+z_i\r\n for i in range(1,Nt):\r\n t_anterior = t[i-1]\r\n dt = (t[i]-t[i-1])/Nsub\r\n z_temp = z[i-1,:].copy() \r\n for k in range(Nsub):\r\n z_temp+= dt*zpunto(z_temp,t_anterior+k*dt)\r\n z[i,:]=z_temp\r\n \r\n return z\r\n\r\ndef zpunto(z,t):\r\n c = np.cos(omega*t)\r\n s = np.sin(omega*t)\r\n \r\n R=np.array([[c,s,0],\r\n [-s,c,0],\r\n [0,0,1]])\r\n\r\n Rp=np.array([[-s,c,0],\r\n [-c,-s,0],\r\n [0,0,0]])*omega\r\n\r\n Rpp=np.array([[-c,-s,0],\r\n [s,-c,0],\r\n [0,0,0]])*omega**2\r\n \r\n x = z[0:3]\r\n xp = z[3:6]\r\n \r\n r = np.sqrt(np.dot(x,x))\r\n \r\n xstill = R@x\r\n rnorm = xstill/r\r\n Fg = -mu/r**2*rnorm\r\n \r\n z2 = xstill[2]**2\r\n rflat = xstill[0]**2 + xstill[1]**2\r\n FJ2 = J2*xstill/r**7\r\n FJ2[0] = FJ2[0]*(6*z2 -1.5*rflat)\r\n FJ2[1] = FJ2[1]*(6*z2 -1.5*rflat)\r\n FJ2[2] = FJ2[2]*(3*z2 -4.5*rflat)\r\n \r\n FJ3 = np.zeros(3)\r\n FJ3[0] = J3*xstill[0]*xstill[2]/r**9 * (10*z2 - 7.5*rflat)\r\n FJ3[1] = J3*xstill[1]*xstill[2]/r**9 * (10*z2 - 7.5*rflat)\r\n FJ3[2] = J3/r**9 * (4*z2 *(z2 - 3*rflat) +1.5*rflat**2) \r\n \r\n \r\n zp = np.zeros(6)\r\n zp[0:3] = xp\r\n \r\n if correccion == 0:\r\n zp[3:6] = R.T@(Fg-(2*Rp@xp+Rpp@x))\r\n elif correccion == 1:\r\n zp[3:6] = R.T@(Fg+FJ2-(2*Rp@xp+Rpp@x))\r\n elif correccion == 2:\r\n zp[3:6] = R.T@(Fg+FJ2+FJ3-(2*Rp@xp+Rpp@x))\r\n \r\n return zp\r\n\r\nt = sat_t\r\nx0 = Radio + H0\r\nvt = 6820.*3.6 #m/s\r\n\r\nz0 = np.array([\r\n sat_x[0],\r\n sat_y[0],\r\n sat_z[0],\r\n sat_vx[0],\r\n sat_vy[0],\r\n sat_vz[0],\r\n ])\r\n\r\nsol = odeint(zpunto, z0, t)\r\n\r\nt1 = perf_counter()\r\nsol_euler = eulerint(zpunto,z0,t,Nsub=10000)\r\nt2 = perf_counter()\r\ntiempo_euler = t2-t1\r\n\r\nx = sol[:,0]\r\ny = sol[:,1]\r\nz = sol[:,2]\r\n\r\n\r\nx_euler = sol_euler[:,0]\r\ny_euler = sol_euler[:,1]\r\nz_euler = sol_euler[:,2]\r\n\r\nvx = sol[:,3]\r\nvy = sol[:,4]\r\nvz = sol[:,5]\r\n\r\nax = np.gradient(vx,t)\r\nay = np.gradient(vy,t)\r\naz = np.gradient(vz,t)\r\n\r\nsat_ax = np.gradient(sat_vx,sat_t)\r\nsat_ay = np.gradient(sat_vy,sat_t)\r\nsat_az = np.gradient(sat_vz,sat_t)\r\n\r\nr = np.sqrt((x)**2+(y)**2+(z)**2)\r\nv = np.sqrt((vx)**2+(vy)**2+(vz)**2)\r\na = np.sqrt((ax)**2+(ay)**2+(az)**2)\r\n\r\nsat_r = np.sqrt((sat_x)**2+(sat_y)**2+(sat_z)**2)\r\nsat_v = np.sqrt((sat_vx)**2+(sat_vy)**2+(sat_vz)**2)\r\nsat_a = np.sqrt((sat_ax)**2+(sat_ay)**2+(sat_az)**2)\r\n\r\ndelta = np.sqrt((x-sat_x)**2+(y-sat_y)**2+(z-sat_z)**2)\r\n\r\ndelta_euler = np.sqrt((x_euler-sat_x)**2+(y_euler-sat_y)**2+(z_euler-sat_z)**2)\r\n\r\nprint (delta[-1])\r\nprint (delta_euler[-1])\r\nprint (f\"el error es de: {(delta_euler[-1]-delta[-1])/delta[-1]*100} %\")\r\nprint (f\"el tiempo de ejecucion de eulerint es de: {tiempo_euler/60} minutos o {tiempo_euler/3600} horas\")\r\n\r\nplt.figure()\r\nplt.plot(t/3600,delta/1000,label=\"Odeint\")\r\nplt.plot(t/3600,delta_euler/1000,label=\"Eulerint Nsub = 10000\")\r\nplt.suptitle(f\"Distancia entre posicion real y predicha, $\\\\delta_{{max}} = {delta[-1]/1000:.1f}$ (Km)\")\r\nplt.ylabel(\"Deriva, $\\\\delta$ (KM)\")\r\nplt.xlabel(\"Tiempo, $t$ (horas)\")\r\nplt.tight_layout(rect=[0,0.03,1,0.95])\r\nplt.legend()\r\n\r\n\r\nplt.show()","sub_path":"Entrega 5/Pregunta_3.py","file_name":"Pregunta_3.py","file_ext":"py","file_size_in_byte":3947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"469937334","text":"#!/usr/bin/env python3\n# MIT License\n#\n# Copyright (c) 2020 FABRIC Testbed\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n#\n# Author: Komal Thareja (kthare10@renci.org)\nfrom __future__ import annotations\n\nimport threading\nimport traceback\nfrom typing import TYPE_CHECKING\n\nfrom fabric.actor.core.common.constants import Constants\nfrom fabric.actor.core.core.actor_identity import ActorIdentity\nfrom fabric.actor.core.manage.messages.client_mng import ClientMng\nfrom fabric.message_bus.messages.proxy_avro import ProxyAvro\nfrom fabric.actor.core.util.id import ID\n\nif TYPE_CHECKING:\n from fabric.actor.core.apis.i_mgmt_actor import IMgmtActor\n from fabric.actor.core.apis.i_actor import IActor\n\n\nclass RemoteActorCacheException(Exception):\n \"\"\"\n Exception raised by RemoteActorCache\n \"\"\"\n\n\nclass RemoteActorCache:\n \"\"\"\n Maintains Remote Actors to which Actor is connected\n \"\"\"\n actor_name = \"NAME\"\n actor_guid = \"GUID\"\n actor_location = \"LOCATION\"\n actor_protocol = \"PROTOCOL\"\n actor_type = \"TYPE\"\n\n def __init__(self):\n self.cache = {}\n from fabric.actor.core.container.globals import GlobalsSingleton\n self.logger = GlobalsSingleton.get().get_logger()\n self.lock = threading.Lock()\n self.local_actor_guids = set()\n\n def known_guids(self) -> set:\n \"\"\"\n Returns list of know GUIDs\n \"\"\"\n result = None\n try:\n self.lock.acquire()\n result = set()\n for k in self.cache.keys():\n result.add(k)\n finally:\n self.lock.release()\n return result\n\n def check_to_remove_entry(self, *, guid: ID):\n \"\"\"\n Check if actor entry can be removed\n @param guid actor guid\n \"\"\"\n if guid is None:\n return\n\n try:\n self.lock.acquire()\n if guid in self.cache:\n value = self.cache[guid]\n if self.actor_location not in value:\n self.cache.pop(guid)\n finally:\n self.lock.release()\n\n def add_cache_entry(self, *, guid: ID, entry: dict):\n \"\"\"\n Add actor entry to Cache\n @param guid actor guid\n @param entry actor entry\n \"\"\"\n if guid is None or entry is None:\n return\n\n try:\n self.lock.acquire()\n self.non_mt_cache_merge(guid=guid, entry=entry)\n self.local_actor_guids.add(guid)\n finally:\n self.lock.release()\n\n def add_partial_cache_entry(self, *, guid: ID, entry: dict):\n \"\"\"\n Add partial actor entry to Cache\n @param guid actor guid\n @param entry actor entry\n \"\"\"\n if guid is None or entry is None:\n return\n try:\n self.lock.acquire()\n self.non_mt_cache_merge(guid=guid, entry=entry)\n finally:\n self.lock.release()\n\n def non_mt_cache_merge(self, *, guid: ID, entry: dict):\n \"\"\"\n Merge to an existing entry if present; remove otherwise\n @param guid actor guid\n @param entry actor entry\n \"\"\"\n if guid not in self.cache:\n self.logger.debug(\"Inserting new entry for {}\".format(guid))\n self.cache[guid] = entry\n return\n current = self.cache[guid]\n for k, v in entry.items():\n current[k] = v\n\n self.cache[guid] = current\n\n def get_cache_entry_copy(self, *, guid: ID) -> dict:\n \"\"\"\n Get a copy of cacher entry\n @param guid actor guid\n \"\"\"\n try:\n self.lock.acquire()\n if guid in self.cache:\n ret_val = self.cache[guid]\n return ret_val\n finally:\n self.lock.release()\n return None\n\n def check_peer(self, *, from_mgmt_actor: IMgmtActor, from_guid: ID, to_mgmt_actor: IMgmtActor, to_guid: ID):\n \"\"\"\n Check if a peer is already connected\n @param from_mgmt_actor from actor\n @param from_guid guid of from actor\n @param to_mgmt_actor to actor\n @param to_guid guid of to actor\n \"\"\"\n self.logger.debug(\"from_mgmt_actor={} from_guid={} to_mgmt_actor={} to_guid={}\".format(type(from_mgmt_actor),\n from_guid,\n type(to_mgmt_actor),\n to_guid))\n try:\n if to_mgmt_actor is not None:\n clients = to_mgmt_actor.get_clients(guid=from_guid)\n if clients is not None:\n self.logger.debug(\"Edge between {} and {} exists (client)\".format(from_guid, to_guid))\n return True\n\n elif from_mgmt_actor is not None:\n brokers = from_mgmt_actor.get_brokers(broker=to_guid)\n if brokers is not None:\n self.logger.debug(\"Edge between {} and {} exists (broker)\".format(from_guid, to_guid))\n return True\n except Exception as e:\n raise RemoteActorCacheException(\"Unable to cast actor {} or {} e={}\".format(from_guid, to_guid, e))\n\n self.logger.debug(\"Edge between {} and {} does not exist\".format(from_guid, to_guid))\n return False\n\n def establish_peer_private(self, *, from_mgmt_actor: IMgmtActor, from_guid: ID, to_mgmt_actor:IMgmtActor,\n to_guid: ID) -> ClientMng:\n \"\"\"\n Establish connection i.e. create either proxies or clients between peer\n @param from_mgmt_actor from actor\n @param from_guid guid of from actor\n @param to_mgmt_actor to actor\n @param to_guid guid of to actor\n \"\"\"\n self.logger.debug(\"establish_peer_private IN\")\n client = None\n from_map = self.get_cache_entry_copy(guid=from_guid)\n to_map = self.get_cache_entry_copy(guid=to_guid)\n\n if from_map is None:\n raise RemoteActorCacheException(\"Actor {} does not have a registry cache entry\".format(from_guid))\n\n if to_map is None:\n raise RemoteActorCacheException(\"Actor {} does not have a registry cache entry\".format(to_guid))\n\n if from_mgmt_actor is not None:\n self.logger.debug(\"From actor {} is local\".format(from_mgmt_actor.get_name()))\n\n protocol = Constants.protocol_local\n kafka_topic = None\n\n if self.actor_location in to_map:\n if self.actor_protocol not in to_map:\n raise RemoteActorCacheException(\"Actor {} does not specify communications protocol (local/kafka)\".format(\n to_map[self.actor_name]))\n\n protocol = to_map.get(self.actor_protocol, None)\n kafka_topic = to_map[self.actor_location]\n self.logger.debug(\"Added To actor location (non-local) {}\".format(to_map[self.actor_location]))\n\n identity = ActorIdentity(name=to_map[self.actor_name], guid=to_guid)\n\n if kafka_topic is not None:\n self.logger.debug(\"Kafka Topic is available, registering broker proxy\")\n proxy = ProxyAvro()\n proxy.set_protocol(protocol)\n proxy.set_guid(str(identity.get_guid()))\n proxy.set_name(identity.get_name())\n proxy.set_type(to_map[self.actor_type])\n proxy.set_kafka_topic(kafka_topic)\n\n try:\n if not from_mgmt_actor.add_broker(broker=proxy):\n raise RemoteActorCacheException(\"Could not register broker {}\".format(from_mgmt_actor.get_last_error()))\n except Exception as e:\n traceback.print_exc()\n else:\n self.logger.debug(\"Not adding broker to actor at this time because the remote actor actor \"\n \"kafka topic is not available\")\n\n if to_mgmt_actor is not None:\n self.logger.debug(\"Creating a client for local to actor\")\n client = ClientMng()\n client.set_name(name=from_mgmt_actor.get_name())\n client.set_guid(guid=str(from_mgmt_actor.get_guid()))\n try:\n to_mgmt_actor.register_client(client=client, kafka_topic=kafka_topic)\n except Exception as e:\n raise RemoteActorCacheException(\"Could not register actor: {} as a client of actor: {} e= {}\".format(\n client.get_name(), to_mgmt_actor.get_name(), e))\n else:\n # fromActor is remote: toActor must be local\n # no-need to create any proxies\n # we only need to register clients\n if to_mgmt_actor is None:\n raise RemoteActorCacheException(\"Both peer endpoints are non local actors: {} {}\".format(\n from_map[self.actor_name], to_map[self.actor_name]))\n\n if self.actor_guid not in from_map:\n raise RemoteActorCacheException(\"Missing guid for remote actor: {}\".format(from_map[self.actor_name]))\n\n self.logger.debug(\"From actor was remote, to actor {} is local\".format(to_mgmt_actor.get_name()))\n if self.actor_location in from_map:\n kafka_topic = from_map[self.actor_location]\n self.logger.debug(\"From actor has kafka topic\")\n self.logger.debug(\"Creating client for from actor {}\".format(from_map[self.actor_name]))\n client = ClientMng()\n client.set_name(name=from_map[self.actor_name])\n client.set_guid(guid=str(from_map[self.actor_guid]))\n try:\n to_mgmt_actor.register_client(client=client, kafka_topic=kafka_topic)\n except Exception as e:\n raise RemoteActorCacheException(\n \"Could not register actor: {} as a client of actor: {} e= {}\".format(\n client.get_name(), to_mgmt_actor.get_name(), e))\n else:\n self.logger.debug(\"Not adding client to actor at this time - remote actor topic not available\")\n self.logger.debug(\"establish_peer_private OUT {}\".format(client))\n return client\n\n def establish_peer(self, *, from_guid: ID, from_mgmt_actor: IMgmtActor, to_guid: ID,\n to_mgmt_actor: IMgmtActor) -> ClientMng:\n \"\"\"\n Check if peer exists in cache and if not Establish connection i.e. create either proxies or clients between peer\n @param from_mgmt_actor from actor\n @param from_guid guid of from actor\n @param to_mgmt_actor to actor\n @param to_guid guid of to actor\n \"\"\"\n self.logger.debug(\"establish_peer IN\")\n client = None\n if from_guid is None or to_guid is None:\n self.logger.error(\"Cannot establish peer when either guid is not known\")\n raise RemoteActorCacheException(\"Cannot establish peer when either guid is not known\")\n try:\n if not self.check_peer(from_mgmt_actor=from_mgmt_actor, from_guid=from_guid,\n to_mgmt_actor=to_mgmt_actor, to_guid=to_guid):\n\n client = self.establish_peer_private(from_mgmt_actor=from_mgmt_actor, from_guid=from_guid,\n to_mgmt_actor=to_mgmt_actor, to_guid=to_guid)\n\n self.check_to_remove_entry(guid=from_guid)\n self.check_to_remove_entry(guid=to_guid)\n\n self.logger.debug(\"Peer established from {} to {}\".format(from_guid, to_guid))\n\n except Exception as e:\n self.logger.error(traceback.format_exc())\n self.logger.error(\"Peer could not be established from {} to {} e:={}\".format(from_guid, to_guid, e))\n self.logger.debug(\"establish_peer OUT {}\".format(client))\n return client\n\n def register_with_registry(self, *, actor: IActor):\n \"\"\"\n Register an actor with Registry\n @param actor actor\n \"\"\"\n try:\n act_name = actor.get_name()\n act_type = actor.get_type()\n act_guid = actor.get_guid()\n\n from fabric.actor.core.container.globals import GlobalsSingleton\n kafka_topic = GlobalsSingleton.get().get_config().get_actor().get_kafka_topic()\n\n entry = {self.actor_name: act_name,\n self.actor_guid: act_guid,\n self.actor_type: act_type.name,\n self.actor_protocol: Constants.protocol_kafka,\n self.actor_location: kafka_topic}\n\n self.add_cache_entry(guid=act_guid, entry=entry)\n # TODO start liveness thread\n except Exception as e:\n self.logger.debug(\"Could not register actor {} with lcoal registry e: {}\".format(actor.get_name(), e))\n\n\nclass RemoteActorCacheSingleton:\n \"\"\"\n Remote Actor Cache Singleton\n \"\"\"\n __instance = None\n\n def __init__(self):\n if self.__instance is not None:\n raise RemoteActorCacheException(\"Singleton can't be created twice !\")\n\n def get(self):\n \"\"\"\n Actually create an instance\n \"\"\"\n if self.__instance is None:\n self.__instance = RemoteActorCache()\n return self.__instance\n\n get = classmethod(get)","sub_path":"fabric/actor/core/container/remote_actor_cache.py","file_name":"remote_actor_cache.py","file_ext":"py","file_size_in_byte":14552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"461377528","text":"from .utils import BaseClient, TYPE_CLIENT\nfrom . import utils\n\nclass Client(BaseClient):\n\n def __init__(self):\n BaseClient.__init__(self, TYPE_CLIENT)\n\n def submitJob(self, job):\n agent = self.agent\n yield from agent.send([utils.SUBMIT_JOB, utils.encodeJob(job)])\n payload = yield from agent.recive()\n self.remove_agent(agent)\n if payload == utils.SUCCESS:\n return True\n else:\n return False\n\n def removeJob(self, job):\n agent = self.agent\n yield from agent.send([utils.REMOVE_JOB, utils.encodeJob(job)])\n payload = yield from agent.recive()\n self.remove_agent(agent)\n if payload == utils.SUCCESS:\n return True\n else:\n return False\n\n def status(self):\n agent = self.agent\n yield from agent.send([utils.STATUS])\n payload = yield from agent.recive()\n self.remove_agent(agent)\n payload = str(payload, 'utf-8').strip()\n stats = payload.split('\\n')\n retval = {}\n for stat in stats:\n stat = stat.strip()\n if not stat:\n continue\n stat = stat.split(\",\")\n retval[stat[0]] = {\n 'func_name': stat[0],\n 'worker_count': int(stat[1]),\n 'job_count': int(stat[2]),\n 'processing': int(stat[3]),\n 'sched_at': int(stat[4])\n }\n\n return retval\n\n def dropFunc(self, func):\n agent = self.agent\n yield from agent.send([utils.DROP_FUNC, func])\n payload = yield from agent.recive()\n self.remove_agent(agent)\n if payload == utils.SUCCESS:\n return True\n else:\n return False\n","sub_path":"aio_periodic/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"317710209","text":"from five import grok\nfrom opengever.document.archival_file import ArchivalFileConverter\nfrom opengever.document.document import IDocumentSchema\nfrom opengever.document.interfaces import IObjectBeforeCheckInEvent\nfrom opengever.document.interfaces import IObjectCheckedOutEvent\nfrom opengever.dossier.docprops import DocPropertyWriter\nfrom zope.lifecycleevent import IObjectMovedEvent\nfrom zope.lifecycleevent import IObjectRemovedEvent\nfrom zope.lifecycleevent.interfaces import IObjectModifiedEvent\n\n\nDISABLE_DOCPROPERTY_UPDATE_FLAG = 'disable_docproperty_update'\n\n\n@grok.subscribe(IDocumentSchema, IObjectCheckedOutEvent)\ndef checked_out(context, event):\n _update_docproperties(context)\n\n\n@grok.subscribe(IDocumentSchema, IObjectBeforeCheckInEvent)\ndef update_docproperties(context, event):\n _update_docproperties(context)\n\n\n@grok.subscribe(IDocumentSchema, IObjectMovedEvent)\ndef update_moved_doc_properties(context, event):\n if IObjectRemovedEvent.providedBy(event):\n return\n\n if context.REQUEST.get(DISABLE_DOCPROPERTY_UPDATE_FLAG):\n return\n\n _update_docproperties(context)\n\n\ndef _update_docproperties(document):\n DocPropertyWriter(document).update()\n\n\n@grok.subscribe(IDocumentSchema, IObjectModifiedEvent)\ndef set_archival_file_state(context, event):\n # Because every filewidget is always marked as changed, in the event\n # descriptions, even when no file has changed, we have to check the request\n request = context.REQUEST\n\n if request.get('ACTUAL_URL').endswith('edit_archival_file'):\n field_name = 'archival_file'\n else:\n field_name = 'IDocumentMetadata.archival_file'\n\n fileupload = request.get('form.widgets.{}'.format(field_name))\n action = request.get('form.widgets.{}.action'.format(field_name), '')\n\n if bool(fileupload):\n ArchivalFileConverter(context).handle_manual_file_upload()\n\n file_removed = action == u'remove'\n file_removed_in_archival_form = isinstance(action, list) and u'remove' in action\n\n if file_removed or file_removed_in_archival_form:\n ArchivalFileConverter(context).remove_state()\n","sub_path":"opengever/document/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"219387356","text":"from django.urls import path\nfrom .import views\n\nurlpatterns = [\n path('', views.fileView, name='post_list'),\n path('post//',views.post_detail, name='post_detail'),\n path('post/new/', views.post_new, name='post_new'),\n path('post', views.post1, name='post1'),\n path('post', views.post2, name='post2'),\n path('post', views.post3, name='post3'),\n ]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"66261284","text":"import camb\nimport numpy as np\nimport os\nimport psutil\nprocess = psutil.Process(os.getpid())\n\nparams=np.asarray([65,0.02,0.1,0.05,2e-9,0.96])\nls = range(2,2000)\n\nH0=params[0]\nombh2=params[1]\nomch2=params[2]\ntau=params[3]\nAs=params[4]\nns=params[5]\n\nprevious = process.memory_info().rss\nprint(previous)\nwhile True:\n pars=camb.CAMBparams()\n pars.set_cosmology(H0=H0,ombh2=ombh2,omch2=omch2,mnu=0.06,omk=0,tau=tau)\n pars.InitPower.set_params(As=As,ns=ns,r=0)\n pars.set_for_lmax(int(max(ls)),lens_potential_accuracy=0)\n\n results = camb.get_results(pars)\n del results\n new = process.memory_info().rss \n print(\"Current Memory Usage: %d. \" % (new), end=\"\")\n print(\"Loop Delta: %d\" % (new - previous))\n previous = new\n","sub_path":"ps3/memorytest.py","file_name":"memorytest.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"438533701","text":"# *****************************************************************************\n# Colfax Tax Trainer\n# Copyright (c) 2018 Adam Milton-Barker - AdamMiltonBarker.com\n# Based on Google's Tensorflow Imagenet Inception V3\n# *****************************************************************************\n\nimport json\nimport InceptionFlow\n\nprint(\"Imported Required Modules\")\n\nclass TassColfaxTrainer():\n \n def __init__(self):\n \n self.InceptionFlow = InceptionFlow.InceptionFlow()\n \n print(\"TassColfaxTrainer Initiated\")\n \n def InitiateTraining(self):\n \n print(\"TassColfaxTrainer Training Initiated\")\n self.InceptionFlow.trainModel()\n \nTassColfaxTrainer = TassColfaxTrainer()\nTassColfaxTrainer.InitiateTraining() \n\n","sub_path":"Intel-AI-DevCloud/Tass-Trainer/runit.py","file_name":"runit.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"377774739","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nFile Description: \nAuthor: nghuyong\nMail: nghuyong@163.com\nCreated Time: 2020/4/14\n\"\"\"\nimport datetime\nimport hashlib\nimport json\nimport logging\nimport re\nimport time\nimport traceback\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pymongo.collection import Collection\nfrom scrapy import FormRequest\nfrom scrapy.http import Request\nfrom scrapy_redis.spiders import RedisSpider\n\nfrom crawler_conf import MONGO_URI_SH_INET, MONGO_DATABASE\nfrom crawler_heimaotousu_redis.items import HeiMaoTouSuItem\nfrom mongo_helper import get_mongo_connection\nfrom pub import generate_document_id\n\n\nclass HeiMaoTouSuSpider(RedisSpider):\n name = \"heimaotousu_spider\"\n base_url = \"https://weibo.cn\"\n redis_key = \"heimaotousu_spider:start_urls\"\n\n db = get_mongo_connection(MONGO_URI_SH_INET, MONGO_DATABASE)\n\n start_time = datetime.datetime.utcnow()\n spider_config_dict = {\n 'heimaotousu': ('黑猫投诉', 'crawler.crawler_sina_tousu'),\n }\n need_proxy_list = ['tousu.sina.com.cn']\n\n complaint_status = {\n 1: '通过审核',\n 3: '待分配',\n 4: '处理中',\n 6: '已回复',\n 7: '已完成',\n 8: '已关闭',\n }\n\n last_update_time = ''\n\n # logging.getLogger('scrapy').propagate = False\n\n headers = {\n 'authority': 'n.sinaimg.cn',\n 'pragma': 'no-cache',\n 'cache-control': 'no-cache',\n 'sec-ch-ua': '\"Chromium\";v=\"94\", \"Google Chrome\";v=\"94\", \";Not A Brand\";v=\"99\"',\n 'accept': '*/*',\n 'x-requested-with': 'XMLHttpRequest',\n 'sec-ch-ua-mobile': '?0',\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36',\n 'sec-ch-ua-platform': '\"macOS\"',\n 'sec-fetch-site': 'cross-site',\n 'sec-fetch-mode': 'no-cors',\n 'sec-fetch-dest': 'script',\n 'referer': 'https://tousu.sina.com.cn/',\n 'accept-language': 'zh-CN,zh;q=0.9',\n }\n\n def make_request_from_data(self, data):\n data = json.loads(data)\n uid = data['uid']\n company_name = data['title']\n\n try:\n url = 'https://tousu.sina.com.cn/api/company/received_complaints'\n last_update_time = self.get_last_time(product_name=company_name)\n timestamp = str(int(time.time() * 1000))\n params = self.init_params(couid=uid, c_type=str(1), page=str(1),\n timestamp=timestamp)\n item = HeiMaoTouSuItem()\n item['product_name'] = company_name\n return FormRequest(url=url, method='GET', formdata=params, headers=self.headers, dont_filter=True,\n priority=10,\n callback=self.parse, meta={\n 'item': item,\n 'uid': uid,\n 'last_update_time': last_update_time,\n 'company_name': company_name,\n })\n except:\n self.logger.error(f'店铺:{company_name}, uid:{uid}, 生成起始Request失败, 错误原因:{traceback.format_exc()}')\n\n def parse(self, response):\n try:\n uid = response.meta['uid']\n company_name = response.meta['company_name']\n last_update_time = response.meta['last_update_time']\n page_nums = self.get_company_page_num(response.text)\n\n url = 'https://tousu.sina.com.cn/api/company/received_complaints'\n for page_num in range(page_nums):\n timestamp = str(int(time.time() * 1000))\n params = self.init_params(couid=uid, c_type=str(1), page=str(page_num + 1),\n timestamp=timestamp)\n item = HeiMaoTouSuItem()\n item['product_name'] = company_name\n yield FormRequest(url=url, method='GET', formdata=params, headers=self.headers, dont_filter=True,\n priority=10,\n callback=self.parse_list, meta={\n 'item': item,\n 'uid': uid,\n 'last_update_time': last_update_time,\n })\n except:\n self.logger.error(traceback.format_exc())\n\n def parse_list(self, response):\n try:\n item = response.meta['item']\n last_update_time = response.meta['last_update_time']\n json_data = json.loads(response.text)\n complaints = json_data['result']['data']['complaints']\n\n if complaints == None:\n complaints = []\n\n pager = json_data['result']['data']['pager']\n current_page = pager['current']\n page_amount = pager['page_amount']\n\n self.logger.info('=' * 80)\n self.logger.info(pager)\n self.logger.info(\n f'公司:{item[\"product_name\"]}, 最新更新日期:{last_update_time} 正在解析第{current_page}/{page_amount}'\n f'页投诉翻页,当前页{len(complaints)}条投诉')\n for complaint in complaints:\n # 解析投诉列表信息\n complaint_item = item.deepcopy()\n complaint_item['url'] = 'https:' + complaint['main']['url']\n complaint_item['complaint_status'] = self.complaint_status[complaint['main']['status']]\n complaint_item['title'] = self.extract_text(complaint['main']['title'])\n complaint_item['comsumer_name'] = complaint['author']['title']\n # complaint_item['wb_profile'] = 'https:' + complaint['author']['wb_profile']\n complaint_item['time'] = self.time_tanser(complaint['main']['timestamp'])\n\n if last_update_time > complaint_item['time']:\n self.logger.warn(\n f'公司:{item[\"product_name\"]}, 标题:{complaint_item[\"title\"]}, 最新更新日期:{last_update_time}, '\n f'当前日期:{complaint_item[\"time\"]}, 无需更新')\n break\n\n self.logger.info(\n f'公司:{item[\"product_name\"]}, 更新标题:{complaint_item[\"title\"]}, 最新更新日期:{last_update_time}, '\n f'当前日期:{complaint_item[\"time\"]}')\n\n abstract = {\n 'complaint_id': complaint['main']['sn'],\n 'complaint_target': self.extract_text(complaint['main']['cotitle']),\n 'appeal': complaint['main']['appeal'],\n 'amount_involved': float(complaint['main']['cost'])\n }\n complaint_item['abstract'] = abstract\n evaluate_u = complaint['main']['evaluate_u']\n\n # 生成解析详情页Request\n yield Request(url=complaint_item['url'], callback=self.parse_detail, dont_filter=True,\n headers=self.headers,\n priority=10, meta={\n 'item': complaint_item,\n 'evaluate_u': evaluate_u\n })\n\n # 检查该公司当前列表页最后一条投诉,与最新更新日期比较(经过观察,投诉列表是按照日期递减排序)\n # last_complaint = complaints[-1]\n # if self.time_tanser(last_complaint['main'][\n # 'timestamp']) >= last_update_time and page_amount >= next_page > current_page:\n # next_item = item.deepcopy()\n # url = 'https://tousu.sina.com.cn/api/company/received_complaints'\n # timestamp = str(int(time.time() * 1000))\n # params = self.init_params(couid=uid, c_type=str(1), page=str(next_page),\n # timestamp=timestamp)\n # yield FormRequest(url=url, method='GET', formdata=params, headers=self.headers, cookies=self.cookies,\n # priority=0,\n # callback=self.parse_list, meta={\n # 'item': next_item,\n # 'uid': uid,\n # 'last_update_time': last_update_time,\n # })\n # else:\n # self.logger.warn(\n # f'公司:{item[\"product_name\"]}, 最新更新日期:{last_update_time},'\n # f' 当前日期:{self.time_tanser(last_complaint[\"main\"][\"timestamp\"])}, '\n # f'当前页码{current_page}, 停止投诉翻页解析')\n except:\n self.logger.error(traceback.format_exc())\n\n def parse_detail(self, response):\n try:\n item = response.meta['item']\n status = response.xpath('//ul[@class=\"ts-q-list\"]/li[last()]/b/text()').get()\n abstract = item['abstract']\n abstract['status'] = status\n\n steplist = response.xpath('//div[@class=\"ts-d-item\"]').getall()\n steps = list()\n for step in steplist:\n soup = BeautifulSoup(step, 'lxml')\n name = soup.find('span', {'class': 'u-name'}).text\n status = soup.find('span', {'class': 'u-status'}).text\n date = soup.find('span', {'class': 'u-date'}).text\n if '评价' in status:\n evaluate_u = response.meta['evaluate_u']\n content = f'服务态度: {evaluate_u[\"attitude\"]}星,处理速度: {evaluate_u[\"process\"]}星,满意度: {evaluate_u[\"satisfaction\"]}星 \\n {evaluate_u[\"evalContent\"]}'\n else:\n content = soup.find('div', {'class': 'ts-d-cont'}).text\n data = {\n 'name': name,\n 'status': status,\n 'date': date,\n 'detail': content,\n }\n steps.append(data)\n item['step_list'] = steps\n\n item['api_name'] = self.name\n\n relategroupts_url = response.xpath('//a[@data-sudaclick=\"relategroupts_view\"]/@href').get()\n if relategroupts_url:\n item['group_complaint_id'] = re.search('.*?/view/(.*)', '/grp_comp/view/G17354926946').group(1)\n\n item['insert_time'] = datetime.datetime.utcnow()\n item['document_id'] = generate_document_id(dict(item), ['url', 'complaint_status'])\n yield item\n except:\n self.logger.error(traceback.format_exc())\n\n def get_company_page(self, url):\n response = requests.get(url, headers=self.headers)\n json_data = response.json()\n return json_data\n\n def get_company_page_num(self, content):\n try:\n json_data = json.loads(content)\n page_nums = json_data['result']['data']['pager']['page_amount']\n if page_nums:\n if page_nums < 100:\n return page_nums\n else:\n return 100\n else:\n return 1\n except:\n return 1\n # return page_nums if page_nums and page_nums < 100 else 100\n\n def get_complaint_page(self, url, params):\n response = requests.get(url, params=params, headers=self.headers)\n json_data = response.json()\n page_nums = json_data['result']['data']['pager']['page_amount']\n return page_nums\n # return page_nums if page_nums and page_nums < 100 else 100\n\n def get_complaint_page_num(self, url, params):\n page_nums = self.get_complaint_page(url, params)\n return page_nums\n\n def get_signature(self, params_list):\n params_list.sort()\n s = hashlib.sha256()\n s.update(''.join(params_list).encode('utf-8')) # Hash the data.\n signature = s.hexdigest()\n return signature\n\n def init_params(self, couid, c_type='1', page='1', page_size='10', timestamp=''):\n random_str = 'QNT4vu8q79XzrcdM'\n const_str = '$d6eb7ff91ee257475%'\n\n params_list = [timestamp, random_str, couid, const_str, c_type, page_size, page]\n signature = self.get_signature(params_list)\n\n return {\n 'ts': timestamp,\n 'rs': random_str,\n 'couid': couid,\n 'type': str(c_type),\n \"page_size\": str(page_size),\n 'page': str(page),\n 'signature': signature\n }\n\n def time_tanser(self, timestamp):\n localtime = time.localtime(int(timestamp))\n time_ = time.strftime('%Y-%m-%d %H:%M:%S', localtime)\n return time_\n\n def get_last_time(self, product_name):\n\n try:\n collection: Collection = self.db['crawler.crawler_sina_tousu']\n item = next(collection.find({'product_name': product_name}).sort([('time', -1)]).limit(1))\n return item['time']\n # return '2000-01-1 00:00:00'\n except StopIteration:\n return '2000-01-1 00:00:00'\n\n def extract_text(self, html):\n soup = BeautifulSoup(html, 'html.parser')\n return soup.get_text()\n\n def insert_company(self, companies):\n try:\n collection: Collection = self.db['crawler.crawler_sina_tousu.company']\n collection.insert_many(companies)\n self.logger.info('插入成功')\n except:\n pass\n","sub_path":"crawler_heimaotousu_redis/spiders/heimaotousu.py","file_name":"heimaotousu.py","file_ext":"py","file_size_in_byte":13371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"508771200","text":"import numpy as np\nfrom joblib import dump, load\n\n\ndef find_true_author_index(true_author, mentions):\n \"\"\"\n Finds the index of the mention that is the author of the quote. As there might be little differences in between what\n spaCy sees as Named Entities and what people see as Named Entities, we're just looking for an Named Entity that\n overlaps with the tag.\n\n :param true_author: list(int)\n The index of the tokens of the true_author.\n :param mentions: list(dict)\n The mentions of PER named entities in the article, saved as dicts with keys 'start' (the index of the first\n token in the named entity), 'end' (the index of the last token in the named entity), 'name' (the text of the\n named entity) and 'full_name' (the longest form of that person's name found in the article).\n :return: int\n The index of the mention in the list of mentions, or -1 if the true author isn't a named entity.\n \"\"\"\n if len(true_author) == 0:\n return -1\n reported_start = true_author[0]\n reported_end = true_author[-1]\n for index, mention in enumerate(mentions):\n if mention['start'] <= reported_end and mention['end'] >= reported_start:\n return index\n return -1\n\n\ndef author_full_name(article, author_index):\n \"\"\"\n Determines the full name of someone cited in an article, given the index of the named entity in the article.\n\n :param article: models.Article\n The article containing the author\n :param author_index: int\n The index of the author in the list of mentions.\n :return: string\n The name of the author of the quote.\n \"\"\"\n if author_index < 0 or author_index >= len(article.people['mentions']):\n return None\n return article.people['mentions'][author_index]['full_name']\n\n\ndef author_full_name_no_db(article_mentions, author_index):\n \"\"\"\n Determines the full name of someone cited in an article, given the index of the named entity in the article.\n\n :param article_mentions: ...\n Article.people['mentions']\n :param author_index: int\n The index of the author in the list of mentions.\n :return: string\n The name of the author of the quote.\n \"\"\"\n if author_index < 0 or author_index >= len(article_mentions):\n return None\n return article_mentions[author_index]['full_name']\n\n\ndef extract_speaker_names(article, author_indices):\n \"\"\"\n Given an article and the indices of named entities in the article that are quoted in it, extracts their full names.\n\n :param article: models.Article\n The article containing the author\n :param author_indices: list(int)\n The index of the author in the list of mentions.\n :return: Set(string)\n The full names of people quoted in the article.\n \"\"\"\n speakers = set()\n for i in author_indices:\n full_name = author_full_name(article, i)\n if full_name is not None and full_name not in speakers:\n speakers.add(full_name)\n return speakers\n\n\ndef evaluate_speaker_extraction(true_names, predicted_names):\n \"\"\"\n Computes the precision and recall for an article, given the true people cited and the predictions from a model.\n\n :param true_names: set(string)\n The names as given by user labels\n :param predicted_names: set(string)\n The names as predicted by a model\n :return: float, float\n The precision and recall.\n \"\"\"\n true_positives = 0\n false_positives = 0\n false_negatives = 0\n\n for name in true_names:\n if name in predicted_names:\n true_positives += 1\n else:\n false_negatives += 1\n\n for name in predicted_names:\n if name not in true_names:\n false_positives += 1\n\n if true_positives + false_positives == 0:\n precision = 1\n else:\n precision = true_positives/(true_positives + false_positives)\n\n if true_positives + false_negatives == 0:\n recall = 1\n else:\n recall = true_positives/(true_positives + false_negatives)\n\n return precision, recall\n\n\ndef balance_classes(X, y):\n \"\"\"\n Given the input matrices for an unbalanced classification task (where one class is present much more often than the\n other in the data), balances the classes so they both have the same number of samples by sub-sampling from the class\n that's more present.\n\n :param X: np.ndarray\n The input vectors.\n :param y: np.ndarray\n The labels.\n :return: np.ndarray, np.ndarray\n X, y: the input vectors and labels.\n \"\"\"\n # Indices where the label is 0 (no reported speech is present)\n is_not_quote = (y == 0).nonzero()[0]\n # Indices where the label is 1 (reported speech is present)\n is_quote = (y == 1).nonzero()[0]\n # Takes random indices from the class most present, and all indices from the one least present\n if len(is_quote) < len(is_not_quote):\n subsample = np.random.choice(is_not_quote, size=len(is_quote), replace=False)\n indices = np.sort(np.concatenate((subsample, is_quote)))\n else:\n subsample = np.random.choice(is_quote, size=len(is_not_quote), replace=False)\n indices = np.sort(np.concatenate((subsample, is_not_quote)))\n # Only keeps some of the values\n sampled_y = np.take(y, indices)\n sampled_X = np.take(X, indices, axis=0)\n return sampled_X, sampled_y\n\n\ndef save_model(classifier, filepath):\n \"\"\"\n\n :param classifier:\n :param filepath:\n :return:\n \"\"\"\n dump(classifier, filepath)\n\n\ndef load_model(filepath):\n \"\"\"\n\n :param filepath:\n :return:\n \"\"\"\n return load(filepath)\n","sub_path":"activelearning/backend/ml/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":5632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"302179813","text":"__author__ = 'jcorrea'\n\nfrom omero.gateway import BlitzGateway\nimport omero.scripts as scripts\nimport omero.util.script_utils as script_utils\nimport os\n\nfrom omero.rtypes import rlong, rstring\n\nimport tempfile\n\nimport math\n\nOMERO_HOME=\"/project/projectdirs/ngbi/omero5/OMERO.server\" #TODO: pass env var\n# OMERO_HOME = os.environ['OMERO_HOME']\nqsub_path= \"/usr/syscom/opt/torque/4.2.6/bin/qsub\" #TODO: specify qsub per system\n\nGSCRATCH = \"/global/scratch2/sd/jcorrea\" #TODO: specify scratch per user\n# GSCRATCH = os.environ['SCRATCH']\n# GSCRATCH = \"/global/scratch2/sd/%s\" % (user)\n# cache_dir = os.path.join(GSCRATCH, \"ngbi/tmp\") #TODO: review cache dir\ncache_dir = GSCRATCH\n\nOMERO_PATH=os.path.join(OMERO_HOME,\"bin/omero\")\nSCRIPT_PATH=os.path.join(OMERO_HOME,\"lib/scripts/OMERO.ImageJ\")\nIMAGEJ_PATH=os.path.join(SCRIPT_PATH, \"resources/ImageJ/ImageJ-linux64\")\nXVFBRUN_PATH=os.path.join(SCRIPT_PATH,\"resources/scripts/xvfb-run\")\nPBS_GEN=os.path.join(SCRIPT_PATH,\"resources/scripts/pbsgen_tfmq.sh\")\nijms_path = os.path.join(SCRIPT_PATH,\"resources/macros\")\nMACRO_PATH = os.path.join(SCRIPT_PATH,\"resources/macros/stack_out.ijm\")\nMACRO_PATH2 = os.path.join(SCRIPT_PATH,\"resources/macros/weka_tfmq.ijm\")\n\ndef omero_ij(conn, scriptParams, uuid):\n\n user = conn.getUser()\n user = user.getName()\n\n # # GSCRATCH = \"/global/scratch2/sd/jcorrea\" #TODO: specify scratch per user\n # GSCRATCH = \"/global/scratch2/sd/%s\" % (user)\n # # cache_dir = os.path.join(GSCRATCH, \"ngbi/tmp\") #TODO: review cache dir\n # cache_dir = GSCRATCH\n #\n # OMERO_PATH=os.path.join(OMERO_HOME,\"bin/omero\")\n # SCRIPT_PATH=os.path.join(OMERO_HOME,\"lib/scripts/OMERO.ImageJ\")\n # IMAGEJ_PATH=os.path.join(SCRIPT_PATH, \"resources/ImageJ/ImageJ-linux64\")\n # XVFBRUN_PATH=os.path.join(SCRIPT_PATH,\"resources/scripts/xvfb-run\")\n # PBS_GEN=os.path.join(SCRIPT_PATH,\"resources/scripts/pbsgen_tfmq.sh\")\n # ijms_path = os.path.join(SCRIPT_PATH,\"resources/macros\")\n # MACRO_PATH = os.path.join(SCRIPT_PATH,\"resources/macros/stack_out.ijm\")\n # MACRO_PATH2 = os.path.join(SCRIPT_PATH,\"resources/macros/weka_tfmq.ijm\")\n\n # model = scriptParams[\"IJ_macro\"]\n ijm_path = scriptParams[\"IJ_macro\"]\n big_mem = scriptParams[\"Big_memory_nodes\"] #TODO: implement big memory nodes\n\n # model_path=model\n\n images, logMessage = script_utils.getObjects(conn, scriptParams)\n if not images:\n return None, None, logMessage\n imageIds = [i.getId() for i in images]\n\n for iId in imageIds:\n\n tmpdir_stack = tempfile.mkdtemp(dir=cache_dir)\n tmpdir_out = tempfile.mkdtemp(dir=cache_dir)\n\n image = conn.getObject(\"Image\", iId)\n dataset = image.getParent().getId()\n\n sizeZ = image.getSizeZ()\n # print(sizeZ)\n\n # job_liner=[]\n\n all_jobs = open(\"%s.job\" % (os.path.join(cache_dir, tmpdir_out.split('/')[-1])), 'w+')\n\n for z in range(sizeZ):\n plane = image.renderImage(z, 0)\n img_path = os.path.join(tmpdir_stack, \"plane_%02d.tiff\" % z)\n plane.save(img_path)\n\n ijmacro_args = \"%s*%s/*%s\" % (img_path, tmpdir_out, ijm_path)\n\n img_path2 = os.path.join(tmpdir_out, \"plane_%02d.tiff\" % z)\n #TODO: memory improvements\n # job_liner=(\"%s -a %s -Xmx2g -- -macro %s %s -batch:%s:0 \\n\" % (XVFBRUN_PATH, IMAGEJ_PATH, MACRO_PATH2, ijmacro_args, img_path2))\n job_liner=(\"%s -a %s -Xmx2g -- -macro %s %s -batch:%s:0 \\n\" % (XVFBRUN_PATH, IMAGEJ_PATH, ijm_path, ijmacro_args, img_path2))\n all_jobs.writelines(job_liner)\n\n all_jobs.close()\n\n system = scriptParams[\"System\"]\n wtime = scriptParams[\"Wall_time\"]\n pmem = scriptParams[\"Private_memory\"]\n\n pbs_file = \"%s.pbs\" % (os.path.join(cache_dir, tmpdir_out.split('/')[-1]))\n\n nodes = int(math.ceil(((2.00*sizeZ)+0.15*(2.00*sizeZ))/48))\n\n stack_args = \"%s/\" % (tmpdir_out)\n image_name = image.getName()\n qsub_cmd = \". %s %s %s %s %s %s %s %s %s %s %s %s > %s\" % (PBS_GEN, user, dataset, image_name, uuid, MACRO_PATH, stack_args, tmpdir_out, wtime, pmem, all_jobs.name, nodes, pbs_file)\n # qsub_cmd = \". %s %s %s %s %s %s %s %s %s %s %s %s > %s\" % (PBS_GEN, user, dataset, image_name, uuid, MACRO_PATH, stack_args, tmpdir_out, wtime, pmem, all_jobs.name, nodes, pbs_file) #TODO: re-factor arguments\n\n\n print(qsub_cmd)\n os.system(qsub_cmd)\n\n enableKeepAlive_time = (72*60*60)\n conn.c.enableKeepAlive(enableKeepAlive_time)\n os.system(\"ssh %s '%s %s'\" % (system, qsub_path, pbs_file))\n\n #TODO: NEWT will replace this\n\ndef runAsScript():\n\n dataTypes = [rstring('Image')]\n\n # ijm_path=os.path.join(SCRIPT_PATH, \"sample/classifiers/\") #TODO: define global vars\n # models_path=os.path.join(SCRIPT_PATH, \"sample/classifiers/\")\n\n # ijm_path = \"\"\n systems=['carver']\n\n ijMacros = []\n for file in os.listdir(ijms_path):\n if file.endswith(\".ijm\"):\n ijMacros.append(str(os.path.join(ijms_path,file)))\n print(ijMacros)\n\n client = scripts.client('ImageJ.py', \"\"\"Run an ImageJ macro\"\"\",\n\n scripts.String(\"Data_Type\", optional=False, grouping=\"1\",\n description=\"Pick Images by 'Image' ID or by the ID of their 'Dataset'\", values=dataTypes, default=\"Image\"),\n\n scripts.List(\"IDs\", optional=False, grouping=\"1.1\",\n description=\"List of Dataset IDs or Image IDs to process.\").ofType(rlong(0)),\n\n scripts.String(\"IJ_macro\", optional=False, grouping=\"2\",\n description=\"Select ImageJ macro\", values=ijMacros, default=ijMacros[0]),\n\n scripts.String(\"System\", optional=False, grouping=\"3\",\n description=\"Select the system\", values=systems, default=systems[0]),\n\n scripts.String(\"Wall_time\", grouping=\"3.1\",\n description=\"Wall time\", default='0:30:00'),\n\n scripts.String(\"Private_memory\", grouping=\"3.2\",\n description=\"Private memory\", default='4GB'),\n\n scripts.Bool(\"Big_memory_nodes\", grouping=\"3.2.1\",\n description=\"Big memory nodes\", default='False'),\n\n scripts.String(\"Nodes\", grouping=\"3.3\",\n description=\"Nodes\", default='1'),\n\n scripts.String(\"PPN\", grouping=\"3.4\",\n description=\"PPN\", default='5'),\n\n version = \"0\",\n authors = [\"Joaquin Correa\", \"Data and Analytics services\"],\n institutions = [\"National Energy Research Scientific Computing Center (NERSC)\"],\n contact = \"JoaquinCorrea@lbl.gov\",\n )\n\n try:\n session = client.getSession();\n\n scriptParams = {}\n for key in client.getInputKeys():\n if client.getInput(key):\n scriptParams[key] = client.getInput(key, unwrap=True)\n\n conn = BlitzGateway(client_obj=client)\n\n admin = conn.getAdminService()\n uuid = admin.getEventContext().sessionUuid\n omero_ij(conn, scriptParams, uuid) #TODO: include IJ methods\n\n finally:\n client.closeSession()\n\nif __name__ == \"__main__\":\n runAsScript()\n\n\n\n\n\n\n\n\n","sub_path":"ImageJ.py","file_name":"ImageJ.py","file_ext":"py","file_size_in_byte":6972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"400839074","text":"from string import printable\n\nimport pytest\n\nfrom . import huffmanfile\n\n\ndef test_compress_decompress():\n data = b\"a\"*1000\n c = huffmanfile.compress(data)\n d = huffmanfile.decompress(c)\n assert d == data\n\n\ndef test_decompress_corrupted():\n with pytest.raises(huffmanfile.HuffmanError):\n data = b'\\x08\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\\x00'\n huffmanfile.decompress(data)\n\n\ndef test_compress_incremental():\n def gen_data(parts=10, partsize=1000):\n for _ in range(parts):\n yield b\"z\" * partsize\n\n comp = huffmanfile.HuffmanCompressor()\n out = bytes()\n data = bytes()\n for data_part in gen_data():\n out += comp.compress(data_part)\n data += data_part\n\n out += comp.flush()\n assert huffmanfile.decompress(out) == data\n\n\ndef test_write_file(tmp_path):\n data = \"\"\"\\\n Donec rhoncus quis sapien sit amet molestie. Fusce scelerisque vel augue\n nec ullamcorper. Nam rutrum pretium placerat. Aliquam vel tristique lorem,\n sit amet cursus ante. In interdum laoreet mi, sit amet ultrices purus\n pulvinar a. Nam gravida euismod magna, non varius justo tincidunt feugiat.\n Aliquam pharetra lacus non risus vehicula rutrum. Maecenas aliquam leo\n felis. Pellentesque semper nunc sit amet nibh ullamcorper, ac elementum\n dolor luctus. Curabitur lacinia mi ornare consectetur vestibulum.\"\"\"\n\n filename = tmp_path / \"archive.hm\"\n\n with huffmanfile.open(filename, \"wt\") as f:\n f.write(data)\n with huffmanfile.open(filename, \"rt\") as f:\n content = f.read()\n\n assert content == data\n","sub_path":"huffmanfile/huffmanfile_test.py","file_name":"huffmanfile_test.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"466370827","text":"from fastapi import APIRouter, Depends\nfrom sqlalchemy.orm import Session\nfrom app import crud, models, schemas\nfrom app.database import SessionLocal, engine\nfrom typing import List\n\nrouter = APIRouter()\n\nmodels.Base.metadata.create_all(bind=engine)\n\n# Dependency\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\n@router.get(\n \"/points/team/{team}\",\n response_model=List[schemas.PointEvent],\n summary=\"point logs for a team\",\n)\ndef read_team(team: str, db: Session = Depends(get_db)):\n db_team = crud.get_points_by_team(db, team=team)\n if db_team is None:\n raise HTTPException(status_code=404, detail=\"Team not found\")\n return db_team\n\n\n@router.get(\n \"/points/team/{team}/season/{season}\",\n response_model=List[schemas.PointEvent],\n summary=\"point logs for a specific team and season combination\",\n)\ndef read_team_and_season(team: str, season: str, db: Session = Depends(get_db)):\n db_ts = crud.get_points_by_team_and_season(db, team=team, season=season)\n if db_ts is None:\n raise HTTPException(\n status_code=404, detail=\"Team and Season combination not found\"\n )\n return db_ts\n","sub_path":"app/routers/teams.py","file_name":"teams.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"81595614","text":"#!/usr/bin/python3\n####################################################################################################\n\nimport re\nimport time\nfrom lib.ui import UI\nfrom lib.script import Script\nfrom lib.cli.facebook.openbmc import OpenBMC\n\nclass OpenBMC_0110(Script):\n def __init__(self, dut, job_id=\"\", image_server = 'None'):\n headline = ['Rebooting BMC']\n purpose = ['To verify reboot of BMC is working properly.',\n 'Run the \"reboot\" command on BMC\\'s console. This will reboot the BMC\\'s RTOS, COM-e module should not be affected.',\n 'Reboot Open BMC 100 times and check if it comes back up every single time.',\n 'It should be able to reach the login prompt.']\n\n self.__dut = dut[1]\n self.image_server = image_server\n super().__init__(headline, purpose, script_path=__file__,job_id=job_id)\n # Start logging the script.\n super().beginLog()\n\n def run(self):\n \"\"\"\n Function Name: run\n Purpose: Executes the steps defined by this test case.\n \"\"\"\n\n # initialize serial, Telnet and SSH UI with SystemMgmt APIs.\n self.__TELNET = super().initUI(self.__dut.telnet_credentials, self.__dut.platform, OpenBMC)\n self.__cycle = 10\n self.__sleep_time = 15\n self.__fail_count = 0\n\n # Do not surround assignment operator = with spaces in paranthesised expressions.\n # ==================================================================================================\n UI.log('STEP#01', 'Stop mTerm process then start a serial over LAN session.')\n self.__TELNET.send('sv stop mTerm\\r')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n self.__TELNET.send('sol.sh\\r')\n self.__TELNET.expect('quit')\n self.__TELNET.send('\\r')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n \n # ==================================================================================================\n UI.log('STEP#02', 'cd to path \"/usr/local/bin/\" in diag.')\n self.__TELNET.send('cd /usr/local/bin/\\r')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n \n # ==================================================================================================\n UI.log('STEP#03', 'Press \"Ctrl+x\" to exit diag then reboot the openBMC.')\n self.__TELNET.send('\\x18')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n \n for i in range(1, self.__cycle + 1):\n # ==================================================================================================\n UI.log('STEP#04 - cycle#' + str(i) + '/' + str(self.__cycle), 'Check the prompt is still \"[root@minipack bin]#\" in COM-e when openBMC boots completed.')\n self.__TELNET.send('reboot\\r')\n \n \n while True:\n self.__TELNET.expect('OpenBMC Release')\n if 'login' in self.__TELNET.getBuff():\n break\n \n self.__TELNET.send('\\r')\n self.__TELNET.expect('login:')\n self.__TELNET.send(self.__dut.ssh_credentials[3] + '\\r')\n self.__TELNET.expect('Password:')\n self.__TELNET.send(self.__dut.ssh_credentials[4] + '\\r')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n self.__TELNET.send('sv stop mTerm\\r')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n \n # ==================================================================================================\n UI.log('STEP#05 - cycle#' + str(i) + '/' + str(self.__cycle), 'Repeat ' + str(i) + '/' + str(self.__cycle) + ' times to check the microserver should not be affected when the BMC is rebooting.')\n self.__TELNET.send('sol.sh\\r')\n self.__TELNET.expect('quit')\n self.__TELNET.send('\\r')\n \n try:\n self.__TELNET.expect(self.__dut.ssh_credentials[5], timeout=180)\n self.__TELNET.send('\\x18')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n self.__TELNET.send('log-util all --clear\\r')\n time.sleep(10)\n except:\n self.__fail_count += 1\n break\n \n if self.__fail_count == 0:\n self.__TELNET.send('sol.sh\\r')\n self.__TELNET.expect('quit')\n self.__TELNET.send('\\r')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n self.__TELNET.send('cd\\r')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n self.__TELNET.send('\\x18')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n \n UI.log('PASS', 'Cycle#' + str(i) + '/' + str(self.__cycle) + ': BMC_0110 Rebooting_BMC is passed.')\n else:\n self.__TELNET.send('\\x18')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n self.__TELNET.send('cat /mnt/data/logfile\\r')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n self.__TELNET.send('hexdump /mnt/data/sel1.bin\\r')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n self.__TELNET.send('bic-util scm --get_post_code\\r')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n \n UI.log('FAIL', 'Cycle#' + str(i) + '/' + str(self.__cycle) + ': Hangs Error.')\n UI.log('FAIL', 'Cycle#' + str(i) + '/' + str(self.__cycle) + ': BMC_0110 Rebooting_BMC is failed.')\n\n def stop(self):\n # Terminate interfaces and restore settings.\n self.__TELNET.close()\n # Stop logging the script.\n super().endLog()","sub_path":"Facebook/openbmc_0110.py","file_name":"openbmc_0110.py","file_ext":"py","file_size_in_byte":5223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"37057869","text":"# Committee Checker\n#\n# Jason Brown (2017-08-11)\n\nexecfile('W:/qm/app/python/ipython_startup.py')\n\nos.chdir(r'N:\\Projects\\QMTools 0_0_9\\utils_to_add\\file_check_tools')\nfrom SharePoint_mod_date import SharePoint_mod_date\n\n\n\ndef doc_check(url):\n from SharePoint_mod_date import SharePoint_mod_date\n check_value = SharePoint_mod_date(url).strftime('%Y-%m-%d %H:%M')\n if check_value == '1999-10-01 00:00':\n return '---'\n else:\n return check_value\n\ndef minutes_filename(c,y,m):\n import datetime as dt\n return '-'.join([c,'minutes',dt.datetime(y,m,1).strftime('%Y-%m')])+'.docx'\n\n\n\nSProot = 'https://vaww.visn1.portal.va.gov/boston/'\n\nprefix = 'cos/committees/'\n\ncommittees = {'ic' : 'Infection Control'\n,'cancer': 'Cancer Committee'\n,'pain' : 'Pain Management Steering Committee'\n,'qic' : 'Quality Improvement Committee'}\n\nfor c in committees:\n for y in [2016]:\n for m in [10,11,12]:\n minutes = SProot+prefix+c+'/minutes/'+minutes_filename(c,y,m)\n print (c, m, minutes_filename(c,y,m), doc_check(minutes))\n\n for y in [2017]:\n for m in [1,2,3,4,5,6,7,8,9]:\n minutes = SProot+prefix+c+'/minutes/'+minutes_filename(c,y,m)\n print (c, m, minutes_filename(c,y,m), doc_check(minutes))\n\n\n\n\n\nprefix = 'cos/admincom/'\n\ncommittees = {'governing' : 'Governing Board'\n,'aeb' : 'Administrative Executive Board'}\n\nfor c in committees:\n for y in [2016]:\n for m in [10,11,12]:\n minutes = SProot+prefix+c+'/minutes/'+minutes_filename(c,y,m)\n print (c, m, minutes_filename(c,y,m), doc_check(minutes))\n\n for y in [2017]:\n for m in [1,2,3,4,5,6,7,8,9]:\n minutes = SProot+prefix+c+'/minutes/'+minutes_filename(c,y,m)\n print (c, m, minutes_filename(c,y,m), doc_check(minutes))\n","sub_path":"utils_to_add/file_check_tools/Committee_Checker.py","file_name":"Committee_Checker.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"459823859","text":"import datetime\n\nimport emoji\nimport requests\n\nfrom core.plugins import Logger\n\nAPPID = \"1a03c4777113bed67e634d12d2c7e739\"\n\n\nclass CityNotFound(Exception):\n pass\n\n\nclass WeatherParser(object):\n BASE_URL = 'http://api.openweathermap.org/data/2.5/'\n FORECAST_URL = BASE_URL + 'forecast'\n WEATHER_URL = BASE_URL + 'weather'\n FIND_URL = BASE_URL + 'find'\n WIND_DIRECTION_LIST = [\n 'Северный',\n 'Северо-Восточный',\n 'Восточный',\n 'Юго-Восточный',\n 'Южный'\n 'Юго-Западный',\n 'Западный',\n 'Северо-Западный',\n ]\n OUTPUT_TEXT = \":small_blue_diamond: Дата: {date}\\n\" \\\n \":small_blue_diamond: Температура: {temp}, {description}\\n\" \\\n \":small_blue_diamond: Макс. температура: {max_temp}\\n\" \\\n \":small_blue_diamond: Мин. температура: {min_temp}\\n\" \\\n \":small_blue_diamond: Ветер: {wind_direction}, {wind_speed} м/с\"\n\n ERROR_MESSAGE = \"упс, не могу пока тебе помочь с этим: возникли неполадки :heavy_exclamation_mark:.\"\n\n def __get_wind_direction(self, angle):\n result = self.WIND_DIRECTION_LIST[0]\n for i in range(7):\n step = 45.0\n min_angle = i * step - 45 / 2.0\n max_angle = i * step + 45 / 2.0\n if i == 0 and angle > 360 - 45 / 2.0:\n angle -= 360\n if min_angle <= angle <= max_angle:\n result = self.WIND_DIRECTION_LIST[i]\n break\n return result\n\n def get_weather_by_city_name(self, city_name):\n try:\n result = requests.get(\n self.FIND_URL,\n params={\n 'q': city_name,\n 'type': 'like',\n 'units': 'metric',\n 'lang': 'ru',\n 'APPID': APPID\n }\n )\n data = result.json()\n\n if not data or not data['list']:\n raise CityNotFound(city_name)\n else:\n return data['list'][0]\n except Exception as error_msg:\n Logger.log('[CWeatherParser:get_weather_by_city_name] %s' % error_msg)\n return emoji.emojize(self.ERROR_MESSAGE, use_aliases=True)\n\n def get_weather_by_city_id(self, city_id):\n try:\n res = requests.get(\n self.WEATHER_URL,\n params={\n 'id': city_id,\n 'units': 'metric',\n 'lang': 'ru',\n 'APPID': APPID\n }\n )\n data = res.json()\n return self.dump(data)\n except Exception as error_msg:\n Logger.log('[CWeatherParser:get_weather_by_city_id] %s' % error_msg)\n return emoji.emojize(self.ERROR_MESSAGE, use_aliases=True)\n\n def get_forecast_by_city_id(self, city_id):\n try:\n res = requests.get(\n self.FORECAST_URL,\n params={\n 'id': city_id,\n 'units': 'metric',\n 'lang': 'ru',\n 'APPID': APPID\n }\n )\n data = res.json()\n print('city:', data['city']['name'], data['city']['country'])\n return '\\n\\n'.join(map(self.dump, data['list']))\n except Exception as error_msg:\n Logger.log('[CWeatherParser:get_forecast_by_city_id] %s' % error_msg)\n return emoji.emojize(self.ERROR_MESSAGE, use_aliases=True)\n\n def dump(self, data):\n return emoji.emojize(\n self.OUTPUT_TEXT.format(\n date=data.get('dt_txt', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))[:16],\n temp='{0:+3.0f}'.format(data['main']['temp']),\n description=data['weather'][0]['description'],\n max_temp='{0:+3.0f}'.format(data['main']['temp_max']),\n min_temp='{0:+3.0f}'.format(data['main']['temp_min']),\n wind_direction=self.__get_wind_direction(data['wind']['deg']),\n wind_speed='{0:2.0f}'.format(data['wind']['speed']),\n ),\n use_aliases=True\n )\n\n\nif __name__ == '__main__':\n weather = WeatherParser()\n city = weather.get_weather_by_city_name('пушкин,ru')\n\n print('>> SEARCH:\\n')\n print(weather.dump(city))\n print('\\n>> DAYS:\\n')\n print(weather.get_forecast_by_city_id(city['id']))\n print('\\n>> CURRENT WEATHER:\\n')\n print(weather.get_weather_by_city_id(city['id']))\n","sub_path":"core/plugins/botweather/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":4715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"202525039","text":"# -*- coding: utf-8 -*-\nimport requests\nfrom bs4 import BeautifulSoup\nimport json,re\n\ndef get_page(url):\n r = requests.get(url)\n soup = BeautifulSoup(r.content,'html.parser')\n showsList = []\n for link in soup.find('div',{'class':'panel red'}).findAll('span',{'class':'text'}):\n try:\n showsList.append({'title':link.text,'url':link.parent.get('href'),'image':link.parent.img.get('src')}) \n except:\n continue\n return showsList\n\ndef getEpisode(url):\n r = requests.get(url)\n soup = BeautifulSoup(r.text,'html.parser')\n dvi = soup.find('video').get('data-video-id')\n da = soup.find('video').get('data-account')\n\n return getVideo(da,dvi)\n\ndef getVideo(da,dvi):\n \n headers = {\n 'Accept': 'application/json;pk=BCpkADawqM2Iex_b1WSK2quDwI8rzeJpxe1cA0RwpDEY17exErxs1Adnvf7j-PKUj9FI8tihvMonKjBIBcBGLij2stKlQW241mZpYKa4d9L9lrmao59EDzbVbx6NGYkc-Zay3zWMPGdrOo-i'\n }\n \n html = requests.get('https://edge.api.brightcove.com/playback/v1/accounts/'+da+'/videos/'+dvi,headers=headers)\n\n a = json.loads(html.content)\n try:\n return a['sources'][1]['src']\n\n except:\n pass","sub_path":"plugin.video.hop/hop.py","file_name":"hop.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"506065539","text":"import collections\nfrom flask import render_template, flash, redirect, url_for, abort, json\nfrom flask.ext.login import login_user, current_user, logout_user\nfrom werkzeug.security import check_password_hash, generate_password_hash\nfrom ..models import User, Student, Point, Warn, InfractionType, PointsRemovedHistory\nfrom .forms import LoginForm, PointsForm, PasswordChangeForm, AddStudentForm, RemoveStudentForm, SearchPointsForm, RewardForm\nfrom . import main\nfrom .. import login_manager, db, mail\nfrom config import RESULTS_PER_PAGE\nfrom config import DevelopmentConfig\nimport datetime\nfrom sqlalchemy import or_\nfrom itsdangerous import URLSafeSerializer\nfrom flask_mail import Message\n\n\n@main.app_errorhandler(404)\ndef not_found(error):\n return render_template('404.html'), 404\n\n\n@main.route('/acknowledge/')\ndef confirm_punish(token):\n try:\n point = confirm_token(token)\n if point:\n point.acknowledged = True\n db.session.commit()\n flash(\"Thank you for acknowledging your points.\", \"success\")\n except:\n flash(\"The acknowledgement link is invalid or has expired.\", \"danger\")\n return render_template(\"ack.html\")\n\n\n@main.route('/')\ndef index():\n if not current_user.is_authenticated:\n return redirect(url_for('.login'))\n return render_template('index.html')\n\n\n@main.route('/login', methods=['GET', 'POST'])\ndef login():\n error = None\n if current_user is not None and current_user.is_authenticated:\n return redirect(url_for('.index'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.get(form.username.data)\n if user:\n approved = user.approved\n if not approved:\n error = \"You do not have access to this application.\"\n return render_template(\"login.html\", form=form, error=error)\n pwhash = user.password\n password = form.password.data\n if check_password_hash(pwhash, password):\n user.authenticated = True\n db.session.add(user)\n db.session.commit()\n login_user(user, remember=True)\n return redirect(url_for('.index'))\n else:\n error = \"Your password is incorrect. Please try again.\"\n else:\n error = \"Sorry. \" + form.username.data + \" is not a valid username.\"\n return render_template(\"login.html\", form=form, error=error)\n\n\n@main.route('/logout')\ndef logout():\n if not current_user.is_authenticated:\n return redirect(url_for('.login'))\n user = current_user\n user.authenticated = False\n db.session.add(user)\n db.session.commit()\n logout_user()\n return redirect(url_for('.login'))\n\n\n@main.route('/students')\ndef studentlist():\n if not current_user.is_authenticated:\n return redirect(url_for('.login'))\n students = Student.query.filter_by(active=True).order_by(Student.lname)\n return render_template(\"studentlist.html\", students=students)\n\n\n@main.route('/students/', methods=['GET', 'POST'])\ndef student(pawprint):\n if not current_user.is_authenticated:\n return redirect(url_for('.login'))\n student = Student.query.filter_by(pawprint=pawprint, active=True).first()\n if student is None:\n abort(404)\n now = datetime.datetime.today()\n return render_template('student.html', student=student, time=now)\n\n\n@main.route('/students//give-points', methods=['GET', 'POST'])\ndef givepointspage(pawprint):\n if not current_user.is_authenticated:\n return redirect(url_for('.login'))\n student = Student.query.filter_by(pawprint=pawprint, active=True).first()\n if student is None:\n abort(404)\n form = PointsForm()\n infractiontypes = InfractionType.query.all()\n infraction_names = []\n for infraction in infractiontypes:\n infraction_names.append(infraction.description)\n infraction_choices = list(enumerate(infraction_names, 1))\n form.pointsField.choices = infraction_choices\n if form.validate_on_submit():\n try:\n do_punish(form, pawprint, student)\n return redirect(url_for('.student', pawprint=pawprint))\n except Exception as e:\n abort(500)\n now = datetime.datetime.today()\n return render_template('doPunish.html', student=student, time=now, form=form)\n\n\n@main.route('/students//remove-points', methods=['GET', 'POST'])\ndef rewardpage(pawprint):\n if not current_user.is_authenticated:\n return redirect(url_for('.login'))\n student = Student.query.filter_by(pawprint=pawprint, active=True).first()\n if student is None:\n abort(404)\n form = RewardForm()\n if form.validate_on_submit():\n try:\n point_total = student.pointTotal\n points_to_remove = float(form.removePointsField.data)\n new_point_total = point_total - points_to_remove\n if new_point_total < 0:\n flash(\"You can not lower a student's point total into the negatives.\", \"warning\")\n else:\n now = datetime.datetime.today()\n new_point_removal_log_entry = PointsRemovedHistory(points_to_remove, form.whyField.data,\n now, current_user.username, pawprint)\n db.session.add(new_point_removal_log_entry)\n student.pointTotal = new_point_total\n db.session.commit()\n flash(\"Points removed.\", 'success')\n return redirect(url_for('.student', pawprint=pawprint))\n except Exception as e:\n abort(500)\n return render_template('doReward.html', student=student, form=form)\n\n\n@main.route('/profile/')\ndef profile(username):\n if not current_user.is_authenticated:\n return redirect(url_for('.login'))\n if username != current_user.username:\n return redirect(url_for('.index'))\n user = User.query.get(username)\n return render_template('profile.html', user=user)\n\n\n@main.route('/profile//password-change', methods=['GET', 'POST'])\ndef change_password(username):\n if not current_user.is_authenticated:\n return redirect(url_for('.login'))\n if username != current_user.username:\n return redirect(url_for('.index'))\n user = User.query.get(username)\n form = PasswordChangeForm()\n if form.validate_on_submit():\n try:\n if check_password_hash(user.password, form.currentPassword.data):\n newPassword = generate_password_hash(form.confirm.data)\n user.password = newPassword\n db.session.commit()\n return redirect(url_for('.logout'))\n except Exception as e:\n abort(500)\n flash(\"You will be asked to login again if your password change is successful\", 'danger')\n return render_template('passChange.html', user=user, form=form)\n\n\n@main.route('/profile//add-student', methods=['GET', 'POST'])\ndef add_student(username):\n if not current_user.is_authenticated:\n return redirect(url_for('.login'))\n if username != current_user.username:\n return redirect(url_for('.index'))\n form = AddStudentForm()\n if form.validate_on_submit():\n try:\n pawprint = form.pawprintField.data\n student_to_be_added = Student.query.get(pawprint)\n if student_to_be_added:\n if student_to_be_added.active is False:\n student_to_be_added.active = True\n points = Point.query.filter_by(student_id=pawprint).all()\n for point in points:\n point.active = True\n warns = Warn.query.filter_by(student_id=pawprint).all()\n for warn in warns:\n warn.active = True\n db.session.commit()\n flash(\"We found a student with that pawprint already in the system, but marked as inactive. We have reactivated this student.\")\n else:\n flash(\"Student is already in system and active. No action taken.\")\n else:\n first_name = form.firstNameField.data\n last_name = form.lastNameField.data\n newStudent = Student(pawprint, first_name, last_name)\n db.session.add(newStudent)\n db.session.commit()\n flash(\"Student added successfully.\", 'success')\n return redirect(url_for(\".profile\", username=current_user.username))\n except Exception as e:\n abort(500)\n return render_template('addStudent.html', form=form)\n\n\n@main.route('/profile//remove-student', methods=['GET', 'POST'])\ndef remove_student(username):\n if not current_user.is_authenticated:\n return redirect(url_for('.login'))\n if username != current_user.username:\n return redirect(url_for('.index'))\n form = RemoveStudentForm()\n pawprints = db.session.query(Student.pawprint).all()\n p = []\n for pawprint in pawprints:\n p.append(pawprint.pawprint)\n p = json.dumps(p)\n if form.validate_on_submit():\n try:\n pawprint = form.pawprintField.data\n toDelete = Student.query.get(pawprint)\n if toDelete:\n points = Point.query.filter_by(student_id=pawprint).all()\n for point in points:\n point.active = False\n warns = Warn.query.filter_by(student_id=pawprint).all()\n for warn in warns:\n warn.active = False\n toDelete.active = False\n db.session.commit()\n flash(\"Student removed successfully.\", 'success')\n else:\n flash(\"Could not find student. Please verify that pawprint is correct.\", \"warning\")\n return redirect(url_for(\".profile\", username=current_user.username))\n except Exception as e:\n abort(500)\n return render_template('removeStudent.html', form=form, student_pawprints=p)\n\n\n@main.route('/points', methods=['GET', 'POST'])\n@main.route('/points/', methods=['GET', 'POST'])\ndef points(page=1):\n if not current_user.is_authenticated:\n return redirect(url_for('.login'))\n points = Point.query.filter_by(active=True).order_by(Point.when.desc()).paginate(page, RESULTS_PER_PAGE, False)\n students = Student.query.filter_by(active=True).order_by(Student.lname)\n form = SearchPointsForm()\n if form.validate_on_submit():\n query = form.pointsSearchField.data\n results = Point.query.filter(or_(Point.why.ilike('%'+query+'%'), Point.type.ilike('%'+query+'%'),\n Point.student_id.ilike('%'+query+'%'), Point.issuer_id.ilike('%'+query+'%'),\n Point.supervisor.ilike('%'+query+'%')), Point.active is True)\n return render_template(\"points.html\", query=query, results=results, students=students, form=form)\n return render_template(\"points.html\", points=points, students=students, form=form)\n\n\n@main.route('/warnings', methods=['GET', 'POST'])\n@main.route('/warnings/', methods=['GET', 'POST'])\ndef warnings(page=1):\n if not current_user.is_authenticated:\n return redirect(url_for('.login'))\n warns = Warn.query.order_by(Warn.when.desc()).paginate(page, RESULTS_PER_PAGE, False)\n students = Student.query.filter_by(active=True).order_by(Student.lname)\n form = SearchPointsForm()\n if form.validate_on_submit():\n query = form.pointsSearchField.data\n results = Warn.query.filter(or_(Warn.why.ilike('%' + query + '%'), Warn.type.ilike('%' + query + '%'),\n Warn.student_id.ilike('%' + query + '%'),\n Warn.issuer_id.ilike('%' + query + '%'),\n Warn.supervisor.ilike('%' + query + '%')))\n return render_template(\"points.html\", query=query, results=results, students=students, form=form)\n return render_template(\"warnings.html\", warns=warns, students=students, form=form)\n\n\n@main.route('/analytics')\ndef analytics(chartID='chart_ID', chart_type='line', chart_height=500):\n if not current_user.is_authenticated:\n return redirect(url_for('.login'))\n results = db.session.query(Point.when).add_column(Point.amount).all()\n timeArray = []\n amountArray = []\n d = {}\n for result in results:\n time = result.when\n if time in d:\n amount = d[time] + result.amount\n d[time] = amount\n else:\n d[time] = result.amount\n od = collections.OrderedDict(sorted(d.items()))\n for key, value in od.items():\n t = json.dumps(key.isoformat())\n timeArray.append(t)\n amountArray.append(value)\n chart = {\"renderTo\": chartID, \"type\": chart_type, \"height\": chart_height}\n title = {\"text\": 'Points Assigned By Day'}\n xAxis = {\"categories\": timeArray}\n yAxis = {\"title\": {\"text\": 'Points'}, 'plotlines': [{'value': 0, 'width': 1, 'color': '#808080'}]}\n series = [{\"name\": 'Amount', \"data\": amountArray}]\n return render_template('analytics.html', chartID=chartID, chart=chart, series=series, title=title, xAxis=xAxis,\n yAxis=yAxis)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(user_id)\n\n\ndef do_punish(form, pawprint, student):\n if form.warning.data is True:\n try:\n give_warnings(form, pawprint)\n except Exception as e:\n raise e\n else:\n try:\n give_points(form, pawprint, student)\n except Exception as e:\n raise e\n\n\ndef give_points(form, pawprint, student):\n try:\n infraction = InfractionType.query.get(form.pointsField.data)\n typeOf = infraction.description\n if form.customAmountField.data != '':\n amount = form.customAmountField.data\n else:\n amount = infraction.value\n newpts = Point(amount, typeOf, form.whyField.data, form.whenField.data,\n form.supervisorField.data, current_user.username, pawprint)\n student.pointTotal += float(amount)\n #token = generate_confirmation_token(newpts)\n #confirm_url = url_for('confirm_punish', token=token, _external=True)\n #html = render_template('email.html', confirm_url=confirm_url, type=newpts.typeOf, amount=newpts.amount)\n #subject = \"You have been given points. Please acknowledge.\"\n #send_email(pawprint+\"@mail.missouri.edu\", subject, html)\n db.session.add(newpts)\n db.session.commit()\n flash(\"Points issued.\", 'success')\n except Exception as e:\n raise Exception('Something went wrong: ' + str(e))\n\n\ndef give_warnings(form, pawprint):\n try:\n infraction = InfractionType.query.get(form.pointsField.data)\n warnType = infraction.description\n newWarning = Warn(warnType, form.whyField.data, form.whenField.data,\n form.supervisorField.data, current_user.username, pawprint)\n db.session.add(newWarning)\n db.session.commit()\n flash(\"Warning issued.\", 'success')\n except Exception as e:\n raise Exception('Something went wrong: ' + str(e))\n\n\ndef generate_confirmation_token(id):\n serializer = URLSafeSerializer(DevelopmentConfig.SECRET_KEY)\n return serializer.dumps(id, salt=DevelopmentConfig.SECURITY_PASSWORD_SALT)\n\n\ndef confirm_token(token):\n serializer = URLSafeSerializer(DevelopmentConfig.SECRET_KEY)\n try:\n point = serializer.loads(token, salt=DevelopmentConfig.SECURITY_PASSWORD_SALT)\n except:\n return False\n return point\n\n\ndef send_email(to, subject, template):\n msg = Message(subject, recipients=[to], html=template, sender=DevelopmentConfig.MAIL_DEFAULT_SENDER)\n mail.send(msg)\n","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"503879907","text":"import logging\nimport codecs\nfrom bs4 import BeautifulSoup\n\n\n\ndef read_langs(file_name):\n\n logging.info((\"Reading lines from {}\".format(file_name)))\n total_data=[]\n\n with codecs.open(file_name, \"r\", \"utf-8\") as file:\n\n data = file.read()\n # data = data[0:2116]\n soup = BeautifulSoup(data, 'html.parser')\n results = soup.find_all('sentence')\n for item in results:\n\n text = item.find(\"text\").text.strip()\n mistakes = item.find_all(\"mistake\")\n\n error_informations = []\n for mistake in mistakes:\n location = mistake.find(\"location\").text.strip()\n wrong = mistake.find(\"wrong\").text.strip()\n right = mistake.find(\"correction\").text.strip()\n error_infor_temp = []\n error_infor_temp.append(location)\n error_infor_temp.append(',')\n error_infor_temp.append(wrong)\n error_infor_temp.append(',')\n error_infor_temp.append(right)\n error_infor_temp.append(';')\n error_informations.append(error_infor_temp)\n if text[int(location)-1] != wrong:\n print(\"The character of the given location does not equal to the real character\")\n sentence = text\n if len(sentence) < 511:\n with open('data/Auto_Gener_Data/train.txt', 'a', encoding='utf-8') as f:\n f.write(sentence)\n f.write(\"\\n\")\n for i in range(0, len(error_informations)):\n f.writelines(error_informations[i])\n f.write(\"\\n\")\n\n\n\n# ----------Sentence-Level don't need tag---------\n# sen = list(text)\n# tags = [\"0\" for _ in range(len(sen))]\n# for i in locations:\n# tags[i - 1] = \"1\"\n\n# total_data.append([\" \".join(sentence), \" \".join(tags)])\n\n return total_data","sub_path":"Automatic-Corpus_Filter_Data_Aug/Discriminator-binary/utils_2.py","file_name":"utils_2.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"590832840","text":"# -*- coding: utf-8 -*-\nfrom geofabric.model.contractedcatchment import ContractedCatchment\nfrom geofabric.view.ldapi import GEOFClassRenderer, SchemaOrgRendererMixin\nimport geofabric._config as config\n\n\nclass CatchmentRenderer(SchemaOrgRendererMixin, GEOFClassRenderer):\n GEOF_CLASS = config.URI_CATCHMENT_CLASS\n\n def __init__(self, request, identifier, views, *args,\n default_view_token=None, **kwargs):\n _views = views or {}\n _uri = ''.join([config.URI_CONTRACTED_CATCHMENT_INSTANCE_BASE, identifier])\n kwargs.setdefault('geof_template', 'class_contractedcatchment.html')\n kwargs.setdefault('hyf_template', 'class_contractedcatchment.html')\n super(CatchmentRenderer, self).__init__(\n request, _uri, _views, *args,\n default_view_token=default_view_token, **kwargs)\n self.identifier = identifier\n if self.view == \"alternates\":\n self.instance = None\n else:\n self.instance = ContractedCatchment(self.identifier)\n","sub_path":"geofabric/view/ldapi/catchment.py","file_name":"catchment.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"405408096","text":"\"\"\"\nChapter 2,Exercise 7\n\"\"\"\n\nfrom common import simpledatastructures, customexceptions\n\n\ndef is_palindrome(singlyroot):\n \"\"\"\n Checks if the singly linked list starting at singlyroot\n represents a palindrome.\n\n Args:\n singlyroot: The head of the singly linked list.\n\n Returns:\n A True if the list represents a palindrome. False otherwise.abs\n\n Raises:\n A customerexceptions.InvalidArgumentException is the singlyroot\n is not of the correct type.\n \"\"\"\n\n if not singlyroot:\n return True\n\n if not isinstance(singlyroot, simpledatastructures.SinglyNode):\n raise customexceptions.InvalidArgumentException(\"singlyroot is not of a valid type\")\n\n if not singlyroot.next:\n return True\n\n # Find length of the list - O(n)\n list_len = len(list(singlyroot.nodes()))\n midpoint = ((list_len - 1) // 2) + 1\n\n result = check_palindrome(singlyroot, midpoint, list_len % 2 == 0)\n return result['check']\n\n\ndef check_palindrome(singlynode, level, is_even_len):\n\n assert level > 0\n\n result = {}\n\n # Base case\n if level == 1:\n if is_even_len:\n result['check'] = singlynode.data == singlynode.next.data\n result['partnernode'] = singlynode.next.next\n else:\n result['check'] = True\n result['partnernode'] = singlynode.next\n else:\n result = check_palindrome(singlynode.next, level-1, is_even_len)\n result['check'] = result['check'] and (singlynode.data == result['partnernode'].data)\n result['partnernode'] = result['partnernode'].next\n\n return result\n\n\n \n ","sub_path":"chapter2/exercise7.py","file_name":"exercise7.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"525577019","text":"# consume an API end-point using RxPY\r\nimport requests # pip install requests\r\nimport rx # pip install rx\r\nimport json\r\nfrom rx import operators as ops\r\n\r\n# access this API end-point: https://jsonplaceholder.typicode.com/users\r\ndef filternames(x, l):\r\n if(x['name'].startswith(l)): # later make use of the chosen letter\r\n return x['name']\r\n else:\r\n return ''\r\n\r\ndef main():\r\n # ask the user which letter they want\r\n letter = input('Which letter? ')\r\n content = requests.get('https://jsonplaceholder.typicode.com/users')\r\n y = json.loads(content.text)\r\n # use rx to make this an observable\r\n source = rx.from_(y)\r\n # use our observable\r\n case1 = source.pipe(\r\n ops.filter(lambda c: filternames(c, letter)), \r\n ops.map(lambda a: a['name'])\r\n )\r\n # wire up the subscription\r\n case1.subscribe(\r\n on_next = lambda i: print('Received {}'.format(i)),\r\n on_error = lambda e: print('Reeceived Error: {}'.format(e)),\r\n on_completed = lambda: print('All done')\r\n )\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"data/beyondAdvancedPythonApril2021-main/my_rxpy.py","file_name":"my_rxpy.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"350524892","text":"\n\n# Credit goes to https://bitbucket.org/spookylukey/django-fabfile-starter/src\n\nimport os\nimport datetime as dt\nfrom io import StringIO\nimport json\n\nimport posixpath\nimport fabric\nimport requests\n\nfrom fabsettings import (USER, HOST, DJANGO_APP_NAME,\n DJANGO_APPS_DIR, LOGS_ROOT_DIR,\n APP_PORT, GUNICORN_WORKERS, DJANGO_PROJECT_NAME,\n STAGING_APP_PORT)\n\n\ndef upload_template(c, filename, destination, context=None, template_dir=None):\n \"\"\"\n Render and upload a template text file to a remote host.\n \"\"\"\n\n text = None\n template_dir = template_dir or os.getcwd()\n from jinja2 import Environment, FileSystemLoader\n jenv = Environment(loader=FileSystemLoader(template_dir))\n context = context if context is not None else {}\n text = jenv.get_template(filename).render(**context)\n # Force to a byte representation of Unicode, or str()ification\n # within Paramiko's SFTP machinery may cause decode issues for\n # truly non-ASCII characters.\n # text = text.encode('utf-8')\n\n # Upload the file.\n return c.put(\n StringIO(text),\n destination,\n )\n\n\ndef venv(c):\n \"\"\"\n Runs a command in a virtualenv (which has been specified using\n the virtualenv context manager\n \"\"\"\n return c.prefix(\"source {}/bin/activate\".format(c.config.bgtools.VENV_DIR))\n\n\ndef install_dependencies(c):\n ensure_virtualenv(c)\n with venv(c), c.cd(c.config.bgtools.SRC_DIR):\n c.run(\"pip install -U -r requirements.txt\")\n\n\ndef file_exists(c, path):\n print('checking existence of: {}: {}'.format(path, bool(c.run('stat {}'.format(path), hide=True, warn=True))))\n return c.run('stat {}'.format(path), hide=True, warn=True).ok\n\n\ndef ensure_virtualenv(c):\n args = c.config.bgtools\n ensure_dir(c, args.SRC_DIR)\n if file_exists(c, args.VENV_DIR):\n return\n\n with c.cd(args.DJANGO_APP_ROOT):\n c.run(\"virtualenv --no-site-packages --python={} {}\".format(\n args.PYTHON_BIN, args.venv_subdir))\n c.run(\"echo {} > {}/lib/{}/site-packages/projectsource.pth\".format(\n args.SRC_DIR, args.venv_subdir, args.PYTHON_BIN))\n\n\ndef ensure_dir(c, d):\n print('checking existence of {} on {}'.format(d, c))\n if not file_exists(c, d):\n # note that the parent directory needs to already exist, usually by making a custom app\n # with the correct name in the webfaction control panel\n print('making {}'.format(d))\n c.run(\"mkdir -p {}\".format(d))\n\n\ndef copy_settings(c):\n args = c.config.bgtools\n with c.cd(args.LOCAL_DIR):\n fname = 'settings_{}.py'.format(args.mode)\n c.local('cp {} bgtools/bgtools/private_settings.py'.format(fname))\n c.local('echo STAGING={} >> bgtools/bgtools/private_settings.py'.format('True' if args.staging else False))\n\n\ndef rsync(c, src, dest):\n args = c.config.bgtools\n c.local('rsync -avz {} {}:{}'.format(src,\n args.host,\n dest))\n\n\ndef rsync_source(c):\n \"\"\"\n rsync the source over to the server\n \"\"\"\n args = c.config.bgtools\n rsync(c, os.path.join(args.LOCAL_DIR, 'bgtools'), args.DJANGO_APP_ROOT)\n\n\ndef collect_static(c):\n \"\"\"\n Collect django static content on server\n \"\"\"\n with venv(c), c.cd(c.config.bgtools.SRC_DIR):\n c.run('python manage.py collectstatic --no-input')\n\n\ndef checkout_and_install_libs(c):\n args = c.config.bgtools\n libs = json.load(open('libs.json'))\n ensure_dir(c, args.CHECKOUT_DIR)\n with c.cd(args.CHECKOUT_DIR):\n for lib, params in libs.items():\n print('handling ' + lib)\n libdir = params['repo']\n if libdir != 'local':\n params['branch'] = args.branch\n else:\n with c.cd(args.LOCAL_DIR):\n rsync(c, posixpath.join(params['path'], params['name']),\n args.CHECKOUT_DIR)\n with c.cd(params['name']), venv(c):\n c.run('pip install -U .')\n continue\n github_url = 'https://github.com/{}/{}'.format(params['owner'], params['repo'])\n if not file_exists(c, libdir):\n c.run('git clone {}.git'.format(github_url))\n with c.cd(libdir):\n c.run('git fetch origin')\n if args.mode == 'debug' or args.tag == 'head':\n c.run('git checkout {}'.format(params['branch']))\n c.run('git pull')\n version = c.run('git rev-parse {}'.format(params['branch'])).stdout\n version_url = '{}/commits/{}'.format(github_url, version)\n elif args.mode == 'release':\n tag = args.tag\n if tag == 'latest':\n tag = c.run('git tag -l \"v*\" --sort=-v:refname').stdout.split()[0]\n c.run('git checkout {}'.format(tag))\n version = tag\n version_url = '{}/releases/tag/{}'.format(github_url, tag)\n for src, target in params.get('extras', []):\n with c.cd(args.LOCAL_DIR):\n rsync(c, posixpath.join(args.LOCAL_DIR, 'extras', lib, src),\n posixpath.join(args.CHECKOUT_DIR, libdir, target))\n with venv(c):\n c.run('pip install -U .')\n with c.cd(args.SRC_DIR):\n r = requests.get('https://api.github.com/repos/{}/{}/releases'.format(params['owner'],\n params['repo']))\n changelog = r.json()\n changelog = [{'url': ch['html_url'],\n 'date': dt.datetime.strptime(ch['published_at'][:10], '%Y-%m-%d').date(),\n 'name': ch['name'],\n 'tag': ch['tag_name'],\n 'description': ch['body']}\n for ch in changelog]\n for tname, context in [('version', {'version': version, 'url': version_url}),\n ('changelog', {'changelog': changelog})]:\n print('uploading {}_{}.html'.format(lib, tname))\n upload_template(c, '{}_template.html'.format(tname),\n posixpath.join(args.SRC_DIR,\n DJANGO_APP_NAME,\n 'templates',\n DJANGO_APP_NAME,\n '{}_{}.html'.format(lib, tname)),\n context=context,\n template_dir=posixpath.join(args.LOCAL_DIR, 'templates'))\n\n\n@fabric.task\ndef stop_webserver(c, mode='debug', tag='latest', staging=True, branch='master'):\n \"\"\"\n Stop the webserver that is running the Django instance\n \"\"\"\n populate_args(c, mode=mode, tag=tag, staging=staging, branch=branch)\n c.run(\"kill $(cat {})\".format(c.config.bgtools.GUNICORN_PIDFILE))\n\n\ndef _webserver_command(c):\n args = c.config.bgtools\n return ('{venv_dir}/bin/gunicorn '\n '--error-logfile={error_logfile} '\n '--access-logfile={access_logfile} '\n '--capture-output '\n '-b 127.0.0.1:{port} '\n '-D -w {workers} --pid {pidfile} '\n '{wsgimodule}:application').format(\n **{'venv_dir': args.VENV_DIR,\n 'pidfile': args.GUNICORN_PIDFILE,\n 'wsgimodule': args.WSGI_MODULE,\n 'port': APP_PORT if not args.staging else STAGING_APP_PORT,\n 'workers': GUNICORN_WORKERS,\n 'error_logfile': args.GUNICORN_ERROR_LOGFILE,\n 'access_logfile': args.GUNICORN_ACCESS_LOGFILE}\n )\n\n\n@fabric.task\ndef start_webserver(c, mode='debug', tag='latest', staging=True, branch='master'):\n \"\"\"\n Starts the webserver that is running the Django instance\n \"\"\"\n populate_args(c, mode=mode, tag=tag, staging=staging, branch=branch)\n start_webserver_internal(c)\n\n\ndef start_webserver_internal(c):\n print('starting new webserver: \"{}\"'.format(_webserver_command(c)))\n with c.cd(c.config.bgtools.SRC_DIR):\n c.run(_webserver_command(c), pty=False, echo=True)\n\n\n@fabric.task(hosts=[HOST])\ndef restart_webserver(c, mode=None, tag=None, staging=None, branch=None):\n \"\"\"\n Restarts the webserver that is running the Django instance\n \"\"\"\n populate_args(c, mode=mode, staging=staging, tag=tag, branch=branch)\n restart_webserver_internal(c)\n\n\ndef restart_webserver_internal(c):\n args = c.config.bgtools\n if file_exists(c, args.GUNICORN_PIDFILE):\n print('killing existing webserver')\n c.run(\"kill -HUP $(cat {})\".format(args.GUNICORN_PIDFILE), echo=True)\n else:\n start_webserver_internal(c)\n\n\ndef populate_arg(args, existing, argname):\n return existing if existing is not None else args[argname]\n\n\ndef populate_args(c, **kwargs):\n\n args = c.config.bgtools\n\n # env.use_ssh_config = True\n for k, v in kwargs.items():\n print('setting {} to {}'.format(k, populate_arg(args, v, k)))\n setattr(args, k, populate_arg(args, v, k))\n\n project = DJANGO_PROJECT_NAME\n if args.staging:\n project += '_staging'\n args.DJANGO_APP_ROOT = posixpath.join(DJANGO_APPS_DIR, project)\n\n # Python version\n args.PYTHON_BIN = \"python3.5\"\n # env.PYTHON_PREFIX = \"\" # e.g. /usr/local Use \"\" for automatic\n # env.PYTHON_FULL_PATH = (posixpath.join(env.PYTHON_PREFIX, 'bin', env.PYTHON_BIN)\n # if env.PYTHON_PREFIX else env.PYTHON_BIN)\n\n args.GUNICORN_PIDFILE = posixpath.join(args.DJANGO_APP_ROOT, 'gunicorn.pid')\n args.GUNICORN_ERROR_LOGFILE = posixpath.join(LOGS_ROOT_DIR,\n 'gunicorn_error_{}.log'.format(project))\n args.GUNICORN_ACCESS_LOGFILE = posixpath.join(LOGS_ROOT_DIR,\n 'gunicorn_access_{}.log'.format(project))\n\n args.SRC_DIR = posixpath.join(args.DJANGO_APP_ROOT, DJANGO_PROJECT_NAME)\n args.VENV_DIR = posixpath.join(args.DJANGO_APP_ROOT, args.venv_subdir)\n args.CHECKOUT_DIR = posixpath.join(args.DJANGO_APP_ROOT, 'checkouts')\n\n args.WSGI_MODULE = '{}.wsgi'.format(DJANGO_PROJECT_NAME)\n\n args.LOCAL_DIR = os.path.dirname(os.path.realpath(__file__))\n\n\n@fabric.task(hosts=[HOST])\ndef deploy(c, mode=None, staging=True, tag=None, branch=None):\n populate_args(c, mode=mode, staging=staging, tag=tag, branch=branch)\n print(c.config.bgtools)\n copy_settings(c)\n rsync_source(c)\n install_dependencies(c)\n checkout_and_install_libs(c)\n collect_static(c)\n restart_webserver_internal(c)\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":10949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"78743826","text":"from config import *\r\nimport requests, json\r\nimport alpaca_trade_api as tradeapi\r\n\r\n# On paper account reset it also resets Keys\r\n\r\nBASE_URL = \"https://paper-api.alpaca.markets\"\r\n# TODO: Decide when we want to buy (ceiling and floor)\r\n# TODO: Pick an amount we want to play with (EX: $5000)\r\n# TODO: Type of order for our sale (rn its good till end of day)\r\n# TODO: Last Trade or Last Quote, need to decide how to set our buy limit price\r\n\r\n# Temp ceiling and floor for the sweet spot\r\nceiling = 400\r\nfloor = 300\r\n\r\n# Amount variable is the amount you are willing to spend\r\namount = 5000\r\n\r\n\r\nclass stock:\r\n def __init__(self):\r\n self.alpaca = tradeapi.REST(paper_id, paper_secret, BASE_URL)\r\n\r\n # Sends a buy\r\n def buy(self, symbol, qty, side, type, limit_price, time_in_force):\r\n self.alpaca.submit_order(\r\n symbol=symbol,\r\n qty=qty,\r\n side=side,\r\n type=type,\r\n limit_price=limit_price,\r\n time_in_force=time_in_force\r\n )\r\n\r\n # Sends a sell\r\n def sell(self, symbol, qty, side, type, time_in_force, limit_price, stop_loss):\r\n self.alpaca.submit_order(\r\n symbol=symbol,\r\n qty=qty,\r\n side=side,\r\n type=type,\r\n time_in_force=time_in_force,\r\n limit_price=limit_price,\r\n stop_loss=stop_loss\r\n )\r\n\r\n # Cancels the trade of a specific order\r\n def cancel(self, order_id):\r\n self.alpaca.cancel_order(order_id)\r\n\r\n # Cancels all outstanding trades\r\n def all_cancel(self):\r\n self.alpaca.cancel_all_orders()\r\n\r\n # Grabs the account buying power\r\n def account_balance(self):\r\n r = self.alpaca.get_account()\r\n print(r.buying_power)\r\n\r\n # Grabs the price of the last trade [returns float w/ 2 decimals]\r\n def last_trade(self, symbol):\r\n lt = self.alpaca.get_last_trade(symbol)\r\n print(lt)\r\n print(type(lt))\r\n return lt.price\r\n\r\n # Computes the var(qty) of shares to buy to get var(amount) of stock [returns int]\r\n def qty(self, amount, price):\r\n qty = round(amount / price)\r\n return qty\r\n\r\n def spot_check(self, symbol):\r\n while True:\r\n price = self.last_trade(symbol)\r\n qty = self.qty(amount, price)\r\n if price > floor and price < ceiling:\r\n r = self.buy(symbol, qty, \"buy\", \"limit\", price,\r\n \"fok\") # fok is fill or kill order is fully filled of cancelled\r\n try:\r\n self.position(symbol)\r\n self.sell(symbol, qty, \"sell\", \"limit\", \"gtc\", (price * 1.0002),\r\n (price * 0.9999)) # gtc is good till close\r\n break\r\n except requests.exceptions.HTTPError:\r\n print(\"No Postions in\" + symbol)\r\n\r\n def position(self, symbol):\r\n print(self.alpaca.get_position(symbol))\r\n\r\n def bars(self, symbol, time, limit, end):\r\n self.alpaca.get_barset(symbol, time, limit=limit, end=end)\r\n\r\n#investment = stock()\r\n#investment.account_balance()\r\n#lt = investment.last_trade(\"WKHS\")\r\n#investment.absolute_value(lt)\r\n# investment.buy(\"WKHS\", 100, \"buy\", price, \"limit\", \"fok\") #(symbol, qty, side, type, limit_price, time_in_force)\r\n# investment.position(\"RCL\")\r\n#print(\"hello world\")\r\n\r\n\r\n","sub_path":"stockObject.py","file_name":"stockObject.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"358531543","text":"import sys\nimport os\nimport contextlib\nimport subprocess\n\nimport pytest\n\n\n@contextlib.contextmanager\ndef set_key(dictionary, key, value):\n key_is_set = key in dictionary\n original_value = dictionary.pop(key, None)\n\n dictionary[key] = value\n\n try:\n yield\n finally:\n if key_is_set:\n dictionary[key] = original_value\n else:\n del dictionary[key]\n\n\n@contextlib.contextmanager\ndef insert_value(list, index, value):\n list.insert(index, value)\n try:\n yield\n finally:\n if value in list:\n list.pop(list.index(value))\n\n\n@pytest.yield_fixture(autouse=True, scope='session')\ndef test_project():\n project_dir = os.path.join(os.path.dirname(__file__), 'testprj')\n with insert_value(sys.path, 0, project_dir):\n with set_key(os.environ, 'DJANGO_SETTINGS_MODULE', 'testprj.settings'):\n from django.conf import settings\n assert 'testapp' in settings.INSTALLED_APPS\n\n import django\n if hasattr(django, 'setup'):\n django.setup()\n\n yield\n\n\n@pytest.fixture(scope='session')\ndef manage():\n def call(*args):\n cmd = [\n sys.executable,\n os.path.join(os.path.dirname(__file__), 'testprj', 'manage.py'),\n ] + list(args)\n return subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n\n return call\n","sub_path":"djclick/test/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"596714511","text":"\nimport cv2\nimport numpy as np\n\nimg = cv2.imread('Screenshot from 2020-04-28 20-20-05.png')\nimGrey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\ncv2.imshow(\"grey\", imGrey)\n_,thresh = cv2.threshold(imGrey,240,255,cv2.THRESH_BINARY)\n\ncv2.imshow(\"thers\", thresh)\ncontours,_ = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)\n\n\nfor contour in contours:\n approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True)\n cv2.drawContours(img, [approx],0,(0,0,0),5)\n x = approx.ravel()[0]\n y = approx.ravel()[1]\n if len(approx) == 3:\n cv2.putText(img,\"Triangle\",(x,y),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,0,0))\n elif len(approx) == 4:\n x,y,w,h = cv2.boundingRect(approx)\n aspectRatio = float(w)/h\n print(aspectRatio)\n if aspectRatio >= 0.95 and aspectRatio<=1.05:\n cv2.putText(img, \"Square\", (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0))\n else:\n cv2.putText(img,\"Rectangle\",(x,y),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,0,0))\n elif len(approx) == 5:\n cv2.putText(img,\"Pentagon\",(x,y),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,0,0))\n elif len(approx) == 10:\n cv2.putText(img,\"Star\",(x,y),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,0,0))\n else:\n cv2.putText(img, \"Circle\", (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0))\n\n\n\n\ncv2.imshow(\"shapes\",img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n","sub_path":"OpenCV/shapes.py","file_name":"shapes.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"466009060","text":"\"\"\" Datasets wrapped in conviniet models \"\"\"\nfrom __future__ import print_function\nimport os\nimport sys\nimport csv\n\nfrom six import next\nimport pandas as pd\n\nfrom oddt import toolkit\n\n\nclass pdbbind(object):\n def __init__(self,\n home,\n version=None,\n default_set=None,\n data_file=None,\n opt=None):\n version = int(version)\n self.home = home\n if default_set:\n self.default_set = default_set\n else:\n if version == 2007:\n self.default_set = 'general'\n else:\n self.default_set = 'general_PL'\n self.opt = opt or {}\n self.sets = {}\n self._set_ids = {}\n self._set_act = {}\n\n if version:\n if version == 2007:\n pdbind_sets = ['core', 'refined', 'general']\n else:\n pdbind_sets = ['core', 'refined', 'general_PL']\n for pdbind_set in pdbind_sets:\n if data_file:\n csv_file = data_file\n elif version == 2007:\n csv_file = os.path.join(self.home,\n 'INDEX.%i.%s.data' % (version, pdbind_set))\n elif version == 2016:\n csv_file = os.path.join(self.home,\n 'index',\n 'INDEX_%s_data.%i' % (pdbind_set, version))\n else:\n csv_file = os.path.join(self.home,\n 'INDEX_%s_data.%i' % (pdbind_set, version))\n\n if os.path.isfile(csv_file):\n data = pd.read_csv(csv_file,\n sep='\\s+',\n usecols=[0, 1, 2, 3],\n names=['pdbid',\n 'resolution',\n 'release_year',\n 'act'],\n comment='#')\n self._set_ids[pdbind_set] = data['pdbid'].tolist()\n self._set_act[pdbind_set] = data['act'].tolist()\n self.sets[pdbind_set] = dict(zip(self._set_ids[pdbind_set],\n self._set_act[pdbind_set]))\n if len(self.sets) == 0:\n raise Exception('There is no PDBbind set availabe')\n else:\n pass # list directory, but no metadata then\n\n @property\n def ids(self):\n # return sorted(self.sets[self.default_set].keys())\n return self._set_ids[self.default_set]\n\n @property\n def activities(self):\n return self._set_act[self.default_set]\n\n def __iter__(self):\n for pdbid in self.ids:\n yield _pdbbind_id(self.home, pdbid, opt=self.opt)\n\n def __getitem__(self, pdbid):\n if pdbid in self.ids:\n return _pdbbind_id(self.home, pdbid, opt=self.opt)\n else:\n if type(pdbid) is int:\n return _pdbbind_id(self.home + '', self.ids[pdbid], opt=self.opt)\n return None\n\n\nclass _pdbbind_id(object):\n def __init__(self, home, pdbid, opt=None):\n self.home = home\n self.id = pdbid\n self.opt = opt or {}\n\n @property\n def protein(self):\n f = os.path.join(self.home, self.id, '%s_protein.pdb' % self.id)\n if os.path.isfile(f):\n return next(toolkit.readfile('pdb', f, lazy=True, opt=self.opt))\n else:\n return None\n\n @property\n def pocket(self):\n f = os.path.join(self.home, self.id, '%s_pocket.pdb' % self.id)\n if os.path.isfile(f):\n return next(toolkit.readfile('pdb', f, lazy=True, opt=self.opt))\n else:\n return None\n\n @property\n def ligand(self):\n f = os.path.join(self.home, self.id, '%s_ligand.sdf' % self.id)\n if os.path.isfile(f):\n return next(toolkit.readfile('sdf', f, lazy=True, opt=self.opt))\n else:\n return None\n\n\nclass dude(object):\n\n def __init__(self, home):\n \"\"\"A wrapper for DUD-E (A Database of Useful Decoys: Enhanced)\n http://dude.docking.org/\n\n Parameters\n ----------\n home : str\n Path to files from dud-e\n\n \"\"\"\n self.home = home\n if not os.path.isdir(self.home):\n raise Exception('Directory %s doesn\\'t exist' % self.home)\n\n self.ids = []\n files = ['receptor.pdb', 'crystal_ligand.mol2', 'actives_final.mol2.gz', 'decoys_final.mol2.gz']\n # ids sorted by size of protein\n all_ids = ['fnta', 'dpp4', 'mmp13', 'hivpr', 'ada17', 'mk14', 'egfr', 'src', 'drd3', 'aa2ar',\n 'cah2', 'parp1', 'cdk2', 'lck', 'pde5a', 'thrb', 'aces', 'try1', 'pparg', 'vgfr2',\n 'pgh2', 'esr1', 'fa10', 'esr2', 'ppara', 'dhi1', 'hivrt', 'bace1', 'ace', 'dyr',\n 'akt1', 'adrb1', 'prgr', 'gcr', 'adrb2', 'andr', 'ppard', 'csf1r', 'gria2', 'cp3a4',\n 'met', 'pgh1', 'abl1', 'casp3', 'kit', 'hdac8', 'hdac2', 'braf', 'urok', 'lkha4',\n 'igf1r', 'aldr', 'fpps', 'hmdh', 'kpcb', 'tgfr1', 'ital', 'mp2k1', 'nos1', 'tryb1',\n 'rxra', 'thb', 'cp2c9', 'ptn1', 'reni', 'pnph', 'tysy', 'akt2', 'kif11', 'aofb',\n 'plk1', 'hivint', 'mk10', 'pyrd', 'grik1', 'jak2', 'rock1', 'fa7', 'mapk2', 'nram',\n 'wee1', 'fkb1a', 'def', 'ada', 'fak1', 'mcr', 'pa2ga', 'xiap', 'hs90a', 'hxk4',\n 'mk01', 'pygm', 'glcm', 'comt', 'sahh', 'cxcr4', 'kith', 'ampc', 'pur2', 'fabp4',\n 'inha', 'fgfr1']\n for i in all_ids:\n if os.path.isdir(os.path.join(self.home, i)):\n self.ids.append(i)\n for file in files:\n f = os.path.join(self.home, i, file)\n if not os.path.isfile(f) and not (file[-3:] == '.gz' and os.path.isfile(f[:-3])):\n print('Target %s doesn\\'t have file %s' % (i, file), file=sys.stderr)\n if not self.ids:\n print('No targets in directory %s' % (self.home), file=sys.stderr)\n\n def __iter__(self):\n for dude_id in self.ids:\n yield _dude_target(self.home, dude_id)\n\n def __getitem__(self, dude_id):\n if dude_id in self.ids:\n return _dude_target(self.home, dude_id)\n else:\n raise Exception('Directory %s doesn\\'t exist' % self.home)\n\n\nclass _dude_target(object):\n\n def __init__(self, home, dude_id):\n \"\"\"Allows to read files of the dude target\n\n Parameters\n ----------\n home : str\n Directory to files from dud-e\n\n dude_id : str\n Target id\n \"\"\"\n self.home = home\n self.dude_id = dude_id\n\n @property\n def protein(self):\n \"\"\"Read a protein file\"\"\"\n f = os.path.join(self.home, self.dude_id, 'receptor.pdb')\n if os.path.isfile(f):\n return next(toolkit.readfile('pdb', f))\n else:\n return None\n\n @property\n def ligand(self):\n \"\"\"Read a ligand file\"\"\"\n f = os.path.join(self.home, self.dude_id, 'crystal_ligand.mol2')\n if os.path.isfile(f):\n return next(toolkit.readfile('mol2', f))\n else:\n return None\n\n @property\n def actives(self):\n \"\"\"Read an actives file\"\"\"\n f = os.path.join(self.home, self.dude_id, 'actives_final.mol2.gz')\n if os.path.isfile(f):\n return toolkit.readfile('mol2', f)\n # check if file is unpacked\n elif os.path.isfile(f[:-3]):\n return toolkit.readfile('mol2', f[:-3])\n else:\n return None\n\n @property\n def decoys(self):\n \"\"\"Read a decoys file\"\"\"\n f = os.path.join(self.home, self.dude_id, 'decoys_final.mol2.gz')\n if os.path.isfile(f):\n return toolkit.readfile('mol2', f)\n # check if file is unpacked\n elif os.path.isfile(f[:-3]):\n return toolkit.readfile('mol2', f[:-3])\n else:\n return None\n","sub_path":"oddt/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":8245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"616862329","text":"import sys\nimport socket\nimport datetime\nimport random\nfrom _thread import *\nimport math\n\nserverName = 'localhost'\nserverPort = 13000\ntry:\n serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nexcept socket.error as msg:\n print(\"Gabimi gjate krijimit te soketes: \" + str(msg))\n\ntry:\n print(\"Serveri eshte startuar ne \" + str(serverName) + \" ne portin: \" + str(serverPort))\n print(\"Lidhja me portin \" + str(serverPort) + \"!\")\n serverSocket.bind((serverName, serverPort))\n serverSocket.listen(5)\n print(\"Serveri eshte i gatshem te pranoj kerkesa!\" + \"\\n\")\nexcept socket.error as msg:\n print(\"Gabimi gjate startimit te serverit: \" + str(msg))\n\ndef IPADDRESS():\n return \"IP adresa e klientit eshte: \" + str(address[0])\n\ndef PORT(porti):\n return \"Klienti eshte duke perdorur portin \" + porti\n \ndef COUNT(stringu):\n numriZanore = 0\n numriBashketingellore = 0\n zanore = [ 'a', 'e', 'i', 'o', 'u', 'y', 'A', 'E', 'I', 'O', 'U', 'Y' ]; \n for i in stringu:\n if i.isalpha():\n if i not in zanore:\n numriBashketingellore += 1\n elif i in zanore:\n numriZanore += 1\n return str(\"Teksti i pranuar permban \" + str(numriZanore) + \" zanore dhe \" + str(numriBashketingellore) + \" bashketingellore!\")\n \ndef ROLLTHEDICE():\n min = 1\n max = 6\n \n roll = \"yes\"\n while roll == \"yes\" or roll == \"y\":\n return random.randint(min, max)\n\n\ndef REVERSE(fjala):\n fjala = str(fjala)\n fjalar = fjala[::-1]\n return str(fjalar)\n\n\ndef TIME():\n koha = datetime.datetime.now()\n return koha.strftime(\"%H:%M:%S %p\")\n\ndef PALINDROME(fjala):\n fjala = str(fjala)\n fjalar = fjala[::-1]\n if fjala == fjalar:\n return str(\"Fjala e shenuar eshte PALINDROME!\")\n else:\n return str(\"Fjala e shenuar nuk eshte PALINDROME!\")\n\ndef GAME(): \n randomNumbers = [] \n for iterator in range(5): \n randomNumbers.append(random.randint(1, 35)) \n randomNumbers.sort()\n return randomNumbers\n\ndef GCF(nr1, nr2):\n nr1 = int(nr1)\n nr2 = int(nr2)\n if(nr2==0): \n return nr1 \n else: \n return GCF(nr2,nr1%nr2)\n\ndef CONVERT(opsioni, numri):\n numri = float(numri)\n if opsioni == \"CMTOFEET\":\n return numri / 30.48\n elif opsioni == \"FEETOCM\":\n return numri * 30.48\n elif opsioni == \"KMTOMILES\":\n return numri / 1.609\n elif opsioni == \"MILESTOKM\":\n return numri * 1.609 \n\ndef PRIME(num):\n if num > 1:\n # KONTROLLO PER FAKTORE\n for i in range(2,num):\n if (num % i) == 0:\n return str(str(num) + str(\"nuk eshte numer prim\"))\n return str(str(i) + str(\"times\") + (num//i) + str(\"is\") + str(num))\n break\n else:\n return str(str(num) + str(\" eshte numer prim\"))\n \n # NESE NUMRI INPUTIT ESHTE ME I VOGEL\n # OSE I BARABARTE ME 1 , NUK ESHTE PRIM\n else:\n return str(str(num) + str(\" nuk eshte numer prim\"))\n\ndef clientThread(connection, address):\n while True:\n try:\n client_response = str(connection.recv(1024), \"utf-8\")\n print(\"Kerkesa e klientit \" + str(address[0]) + \" eshte: \" + str(client_response))\n client_response = client_response.split(\" \")\n sendResponse = str(\"\")\n if client_response[0] == \"IPADDRESS\":\n sendResponse = str(IPADDRESS())\n elif client_response[0] == \"PORT\":\n sendResponse = str(PORT(str(address[1])))\n elif client_response[0] == \"COUNT\": \n stringuPerCount=\"\"\n stringuPerCount = str.join(\" \", client_response[1:])\n sendResponse = str(COUNT(stringuPerCount))\n elif client_response[0] == \"ROLLTHEDICE\":\n sendResponse = str(ROLLTHEDICE())\n elif client_response[0] == \"REVERSE\":\n sendResponse = str(REVERSE(client_response[1]))\n elif client_response[0] == \"TIME\":\n sendResponse = str(TIME())\n elif client_response[0] == \"PALINDROME\": \n sendResponse = str(PALINDROME(client_response[1]))\n elif client_response[0] == \"GAME\":\n sendResponse = str(GAME())\n elif client_response[0] == \"GCF\":\n sendResponse = str(GCF(client_response[1], client_response[2]))\n elif client_response[0] == \"CONVERT\":\n sendResponse = str(CONVERT(client_response[1], client_response[2]))\n elif client_response[0] == \"PRIME\":\n sendResponse = str(PRIME(int(client_response[1])))\n elif client_response[0] == \"QUIT\":\n print(\"Klienti \" + str(address[0]) + \" nuk eshte ne linje!\")\n break\n else:\n sendResponse = str(\"Kerkesa nuk egziston! Provoni perseri...\")\n except IndexError:\n sendResponse = str(\"Nuk keni dhene argumentet e duhura! Provoni perseri...\")\n \n connection.send(str.encode(sendResponse))\n connection.close()\n\nwhile 1:\n connection, address = serverSocket.accept()\n print(\"\\nKerkesa per konektim u pranua! Ne linje eshte: | IP: \" + str(address[0]) + \" | Port: \" + str(address[1]))\n start_new_thread(clientThread, (connection, address))\n","sub_path":"SERVERTCP.py","file_name":"SERVERTCP.py","file_ext":"py","file_size_in_byte":5314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"23352581","text":"from django.core.management.base import BaseCommand\n\nfrom targets.models import Timeseries\nfrom targets.profiling import MANIFESTS\n\n\nclass Command(BaseCommand):\n help = \"Displays the manifest versions currently installed and available to use\"\n\n def log(self, verbosity):\n if verbosity > 0:\n return lambda text: self.stdout.write(text)\n return lambda _: None\n\n def handle(self, *args, **kwargs):\n verbosity = kwargs.get(\"verbosity\", 1)\n log = self.log(verbosity)\n log(\"AVAILABLE:\")\n for version in sorted(map(lambda m: m[\"version\"], MANIFESTS.values())):\n log(version)\n log(\"\")\n log(\"INSTALLED:\")\n for version in (\n Timeseries.objects\n .values_list(\"script_version\", flat=True)\n .order_by(\"script_version\")\n .distinct(\"script_version\")\n ):\n log(version)\n","sub_path":"tranque_v1.8.4_source/backend/src/targets/management/commands/showmanifests.py","file_name":"showmanifests.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"83895149","text":"from flask import Blueprint, render_template, abort, redirect, url_for\n\nfrom app.lib.forms import EffectForm, EffectGroupForm, EffectStackForm\nfrom app import database as db\nfrom app.lib.database import ObjProxy\n\n\napp = Blueprint('editor', __name__)\n\n\n@app.route('/')\ndef index():\n return render_template('editor/index.jinja.html', db=db)\n\n\n@app.route('/edit//new', methods=['GET', 'POST'])\n@app.route('/edit//', methods=['GET', 'POST'])\ndef edit(type_, name=None):\n obj = None\n key = None\n form_cls = None\n if type_ == 'effect':\n key = 'effects'\n form_cls = EffectForm\n\n elif type_ == 'group':\n key = 'effect_groups'\n form_cls = EffectGroupForm\n\n elif type_ == 'stack':\n key = 'effect_stacks'\n form_cls = EffectStackForm\n\n else:\n abort(400, \"Invalid type\")\n\n if name:\n obj = db[key].get(name)\n if obj:\n obj = ObjProxy(obj)\n form = form_cls(obj=obj)\n\n if name and obj is None:\n abort(404, \"No such \" + type_)\n\n if form.validate_on_submit():\n with db:\n obj = obj or ObjProxy({})\n form.populate_obj(obj)\n db[key][obj.name] = obj.data\n db.save()\n\n return redirect(url_for('.index'))\n\n return render_template('editor/form.jinja.html', form=form)\n","sub_path":"webgui/app/views/editor.py","file_name":"editor.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"386446653","text":"# -*- coding: utf-8 -*-\r\ndef FR1(DA,t_trd1,t_trd2,num):\r\n #输入:\r\n #DA--2017年股票交易数据\r\n #t_trd1--聚类数据区间开始日期\r\n #t_trd2--聚类数据区间结束日期\r\n #num--基于总体规模与投资效率指标的综合评价方法提取样本个数\r\n #输出:\r\n #Data--形态特征数据\r\n #KeyData--关键价格点数据\r\n #KeyData_index--关键价格点对应序号\r\n import pandas as pd \r\n import fun\r\n import df\r\n import numpy as np\r\n dta=pd.read_excel('ddata.xlsx')\r\n r=fun.Fr(dta,'2016')\r\n c=r[0]\r\n code=list(c.index[0:num])\r\n p=-1\r\n td=pd.read_excel('交易日历数据表.xlsx')\r\n I1=td['Clddt'].values>=t_trd1\r\n I2=td['Clddt'].values<=t_trd2\r\n I=I1&I2\r\n ddt=td.loc[I,['Clddt']]\r\n M=len(ddt)\r\n num=10 #10\r\n Data=np.zeros((len(code),num))\r\n KeyData=np.zeros((len(code),num+1))\r\n KeyData_index=np.zeros((len(code),num+1))\r\n for t in range(len(code)):\r\n data=DA.loc[DA.iloc[:,0].values==code[t],['Trddt','Clsprc']]\r\n I1=data['Trddt'].values>=t_trd1\r\n I2=data['Trddt'].values<=t_trd2\r\n I=I1&I2\r\n dt=data.loc[I,['Clsprc']]['Clsprc']\r\n if len(dt)==M:\r\n p=p+1\r\n dt=pd.Series(dt.values,index=range(len(dt)))\r\n keydata=df.get_keydata(dt,num)\r\n T=df.get_tz(keydata)\r\n y=keydata\r\n KeyData[p,0]=code[t]\r\n Data[p,0]=code[t]\r\n Data[p,1:]=T\r\n KeyData_index[p,0]=code[t]\r\n KeyData[p,1:]=(y.values-min(y.values))/(max(y.values)-min(y.values))\r\n KeyData_index[p,1:]=y.index\r\n Data=Data[0:p,:]\r\n KeyData=KeyData[0:p,:]\r\n KeyData_index=KeyData_index[0:p,:]\r\n return (Data,KeyData,KeyData_index)\r\n\r\ndef FR2(DA,Data,KeyData,KeyData_index,s_trd1,s_trd2,class_num):\r\n #输入:\r\n #DA--2017年股票交易数据\r\n #Data--形态特征数据\r\n #KeyData--关键价格点数据\r\n #KeyData_index--关键价格点对应序号\r\n #s_trd1--收益率计算持有期开始日期\r\n #s_trd2--收益率计算持有期结束日期\r\n #class_num--聚类个数\r\n #输出:\r\n #Data_c--形态特征数据+聚类结果列\r\n #KeyData_c--关键价格点数据+聚类结果列\r\n #KeyData_index_c--关键价格点对应序号+聚类结果列\r\n #D--每只股票代码、所属聚类类别、收益率组成的数据框\r\n #list_cr--每类股票的总收益\r\n import pandas as pd\r\n #from sklearn.cluster import KMeans\r\n import numpy as np\r\n import kmean\r\n #model = KMeans(n_clusters = class_num, random_state=0, max_iter = 10000) \r\n #model.fit(Data[:,1:]) \r\n #c=model.labels_ \r\n c=kmean.K_mean(Data[:,1:],class_num)\r\n p=len(Data)\r\n KeyData_c=np.hstack((KeyData,c.reshape(p,1)))\r\n KeyData_index_c=np.hstack((KeyData_index,c.reshape(p,1)))\r\n Data_c=np.hstack((Data,c.reshape(p,1)))\r\n list_code=[]\r\n list_codec=[]\r\n list_r=[]\r\n list_cr=[]\r\n for t in range(class_num):\r\n code_t=KeyData_c[KeyData_c[:,len(KeyData_c[0,:])-1]==t,0]\r\n r_t=0\r\n count_t=0\r\n for i in range(len(code_t)):\r\n I1=DA['Trddt'].values>=s_trd1\r\n I2=DA['Trddt'].values<=s_trd2\r\n I3=DA['Stkcd'].values==code_t[i]\r\n I=I1&I2&I3\r\n dta=DA.iloc[I,[2]]['Clsprc'].values\r\n if len(dta)>1:\r\n r=(dta[len(dta)-1]-dta[0])/dta[0]\r\n list_code.append(code_t[i])\r\n list_codec.append(t)\r\n list_r.append(r)\r\n r_t=r_t+r\r\n count_t=count_t+1\r\n list_cr.append(r_t/count_t)\r\n D={'code':list_code,'codec':list_codec,'coder':list_r}\r\n D=pd.DataFrame(D)\r\n return (Data_c,KeyData_c,KeyData_index_c,D,list_cr)\r\n\r\n####训练样本x,y的构建\r\nimport pandas as pd \r\nDA=pd.read_excel('DA.xlsx') \r\nR1=FR1(DA,'2017-05-01','2017-07-31',400)\r\nR2=FR2(DA,R1[0],R1[1],R1[2],'2017-08-01','2017-08-31',20)\r\ndt1=R2[0]\r\ncr1=pd.Series(R2[4])\r\n\r\ncrr=cr1.sort_values(ascending=False)\r\ncr=list(crr.index)\r\nfor i in range(len(crr)):\r\n if i<5:\r\n dt1[dt1[:,len(dt1[0,:])-1]==cr[i],len(dt1[0,:])-1]=1\r\ndt1[dt1[:,len(dt1[0,:])-1]!=1,len(dt1[0,:])-1]=-1\r\n\r\nR1=FR1(DA,'2017-06-01','2017-08-31',400)\r\nR2=FR2(DA,R1[0],R1[1],R1[2],'2017-09-01','2017-09-30',20)\r\ndt2=R2[0]\r\ncr2=pd.Series(R2[4])\r\n\r\ncrr=cr2.sort_values(ascending=False)\r\ncr=list(crr.index)\r\nfor i in range(len(crr)):\r\n if i<5:\r\n dt2[dt2[:,len(dt2[0,:])-1]==cr[i],len(dt2[0,:])-1]=1\r\ndt2[dt2[:,len(dt2[0,:])-1]!=1,len(dt2[0,:])-1]=-1\r\n\r\nimport numpy as np \r\ndt=np.vstack((dt2,dt1)) \r\nx=dt[:,1:-1] \r\ny=dt[:,len(dt[0,:])-1] \r\n\r\n####测试样本x1的构建\r\nR1=FR1(DA,'2017-07-01','2017-09-30',400)\r\ndt3=R1[0]\r\nx1=dt3[:,1:]\r\n\r\n####基于支持向量机预测模型的量化投资策略设计\r\nfrom sklearn import svm\r\nclf = svm.SVC()\r\nclf.fit(x, y) \r\nresult=clf.predict(x1) \r\ncode=dt3[result==1,0]\r\nlist_r=[]\r\nfor i in range(len(code)):\r\n I1=DA['Trddt'].values>='2017-10-01'\r\n I2=DA['Trddt'].values<='2017-10-31'\r\n I3=DA['Stkcd'].values==code[i]\r\n I=I1&I2&I3\r\n dta=DA.iloc[I,[2]]['Clsprc'].values\r\n if len(dta)>1:\r\n r=(dta[len(dta)-1]-dta[0])/dta[0]\r\n list_r.append(r)\r\ntotal_r=sum(list_r)\r\n\r\n####沪深300指数同期收益率的计算 \r\nindx300=pd.read_excel('index300.xlsx') \r\nI1=indx300['Idxtrd01'].values>='2017-10-01'\r\nI2=indx300['Idxtrd01'].values<='2017-10-31'\r\nIn=I1&I2\r\nda=indx300.loc[In,'Idxtrd05'].values\r\nindex300_r=(da[len(da)-1]-da[0])/da[0]\r\n\r\n","sub_path":"程序与数据/第10章 综合案例3:股票价格形态聚类与收益分析/10.6.2~10.6.3.py","file_name":"10.6.2~10.6.3.py","file_ext":"py","file_size_in_byte":5391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"221054574","text":"import designpy.advice as a\r\nimport designpy.lib as lib\r\nimport designpy.statemachine as sm\r\n\r\nADVICEMESSAGE = \"consistent indentation\"\r\n\r\n_indentationLevel = 0\r\n_indentationStep = \"\"\r\n\r\ndef advice():\r\n\tsource = lib.removeComments(lib.source(_fileName))\r\n\tsetIndentationStep(getIndentationStep(source))\r\n\r\n\tdefaultState = sm.State(\"default\", lambda line : \"inconsistent indentation at: {}\".format(line.strip()))\r\n\tmustMatchIndentation = sm.State(u\"must match indentation\", lambda line : chr(192) + \"inconsistent indentation at: {}\".format(line.strip()))\r\n\tbackslash = sm.State(\"backslash\", lambda line : \"unexpected error occured at: {}\".format(line.strip()))\r\n\r\n\tdefaultState.addTransition(\\\r\n\t\tmustMatchIndentation,\\\r\n\t\tlambda line : endsWithDoubleDot(line) and matchesIndentationLevelOrLess(line),\\\r\n\t\taction = lambda line : setIndentationLevel(line) and incrementIndentationLevel())\r\n\tdefaultState.addTransition(\\\r\n\t\tbackslash,\\\r\n\t\tlambda line : endsWithBackslash(line) and matchesIndentationLevelOrLess(line))\r\n\tdefaultState.addTransition(\\\r\n\t\tdefaultState,\\\r\n\t\tmatchesIndentationLevelOrLess,\\\r\n\t\taction = setIndentationLevel)\r\n\r\n\tmustMatchIndentation.addTransition(\\\r\n\t\tmustMatchIndentation,\\\r\n\t\tlambda line : endsWithDoubleDot(line) and matchesIndentationLevel(line),\\\r\n\t\taction = lambda line : incrementIndentationLevel())\r\n\tmustMatchIndentation.addTransition(\\\r\n\t\tbackslash,\\\r\n\t\tlambda line : endsWithBackslash(line) and matchesIndentationLevel(line))\r\n\tmustMatchIndentation.addTransition(\\\r\n\t\tdefaultState,\\\r\n\t\tlambda line : matchesIndentationLevel(line),\\\r\n\t\taction = setIndentationLevel)\r\n\r\n\tbackslash.addTransition(\\\r\n\t\tmustMatchIndentation,\\\r\n\t\tendsWithDoubleDot,\\\r\n\t\taction = lambda line : incrementIndentationLevel())\r\n\tbackslash.addTransition(\\\r\n\t\tbackslash,\\\r\n\t\tendsWithBackslash)\r\n\tbackslash.addTransition(\\\r\n\t\tdefaultState,\\\r\n\t\tlambda line : True)\r\n\r\n\tsuccess, message = sm.StateMachine(defaultState).run((line for line in source.split(\"\\n\") if len(line.strip()) != 0))\r\n\tif success:\r\n\t\treturn a.Advice(a.AdviceLevel.GOOD, ADVICEMESSAGE)\r\n\treturn a.Advice(a.AdviceLevel.BAD, ADVICEMESSAGE, message)\r\n\r\ndef getIndentationStep(source):\r\n\tnextLineIndented = False\r\n\tfor line in source.split(\"\\n\"):\r\n\t\tif nextLineIndented:\r\n\t\t\tindentationStep = \"\"\r\n\t\t\tfor char in line:\r\n\t\t\t\tif char == \" \" or char == \"\\t\":\r\n\t\t\t\t\tindentationStep += char\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn indentationStep\r\n\r\n\t\tif line.strip().endswith(\":\"):\r\n\t\t\tnextLineIndented = True\r\n\treturn \"\"\r\n\r\ndef endsWithBackslash(line):\r\n\treturn line.strip().endswith(\"\\\\\")\r\n\r\ndef endsWithDoubleDot(line):\r\n\treturn line.strip().endswith(\":\")\r\n\r\ndef matchesIndentationLevel(line):\r\n\treturn getIndentationLevel(line) == _indentationLevel\r\n\r\ndef matchesIndentationLevelOrLess(line):\r\n\treturn getIndentationLevel(line) <= _indentationLevel\r\n\r\ndef ifMatchesThenSetIndentationLevel(line):\r\n\tif matchesIndentationLevel(line):\r\n\t\treturn setIndentationLevel(line)\r\n\treturn False\r\n\r\ndef setIndentationStep(indentationStep):\r\n\tglobal _indentationStep\r\n\t_indentationStep = indentationStep\r\n\treturn True\r\n\r\ndef setIndentationLevel(line):\r\n\tglobal _indentationLevel\r\n\t_indentationLevel = getIndentationLevel(line)\r\n\treturn True\r\n\r\ndef incrementIndentationLevel():\r\n\tglobal _indentationLevel\r\n\t_indentationLevel += 1\r\n\treturn True\r\n\r\ndef getIndentationLevel(line):\r\n\tif _indentationStep == \"\":\r\n\t\treturn 0\r\n\r\n\tindentationLevel = 0\r\n\twhile line.startswith(_indentationStep):\r\n\t\tindentationLevel += 1\r\n\t\tline = line[len(_indentationStep):]\r\n\treturn indentationLevel","sub_path":"design/advisors/indentationLevel.py","file_name":"indentationLevel.py","file_ext":"py","file_size_in_byte":3512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"619429364","text":"import math\nimport json\n\nEARTH_REDIUS = 6378.137\n\ndef rad(d):\n return d * math.pi / 180.0\n\ndef getDistance(lat1, lng1, lat2, lng2):\n radLat1 = rad(lat1)\n radLat2 = rad(lat2)\n a = radLat1 - radLat2\n b = rad(lng1) - rad(lng2)\n s = 2 * math.asin(math.sqrt(math.pow(math.sin(a/2), 2)\n + math.cos(radLat1) * math.cos(radLat2) * math.pow(math.sin(b/2), 2)))\n s = s * EARTH_REDIUS\n return s\n\n\n# 利用闭包求平均值\ndef make_averager():\n count = 0\n total = 0\n def averager(new_value=None):\n nonlocal count, total\n if new_value != None:\n count += 1\n total += new_value\n return total / count\n return averager\n\n\ndef main():\n # 提取经纬度信息\n with open('nodes.json', 'r') as f:\n data = json.load(f)\n \n # 储存两点的id和之间距离\n res = []\n max_dis = 0\n min_dis = 1\n avg = make_averager()\n\n # 读取厦门路段并储存各路段距离\n with open('xiamen_road.txt', 'r') as f:\n for line in f:\n if line[0] == 'e':\n _, e1, e2 = line.split()\n # 可能出现重复的数据\n lon1, lat1 = data[e1]\n lon2, lat2 = data[e2]\n dis = getDistance(float(lat1), float(lon1), float(lat2), float(lon2))\n # TEST:出现两个在同一位置的不同点\n if dis == 0:\n print(e1, e2)\n # 保存最大、最小、平均值\n max_dis = max(max_dis, dis)\n min_dis = min(min_dis, dis)\n avg(dis)\n # 保存id和之间距离\n res.append((e1, e2, dis))\n\n # 写入文件\n with open('distance.txt', 'w', encoding='utf-8') as f:\n print('平均值:{}'.format(avg() * 1000))\n print('最大值:{}'.format(max_dis * 1000))\n print('最小值:{}'.format(min_dis * 1000))\n # f.write('平均值:{}'.format(avg() * 1000) + '\\n')\n # f.write('最大值:{}'.format(max_dis * 1000) + '\\n')\n # f.write('最小值:{}'.format(min_dis * 1000) + '\\n')\n # f.write('ID1 ID2 DISTANCE(m)\\n')\n for r in res:\n e1, e2, dis = r\n f.write('{}'.format(dis * 1000) + '\\n')\n # f.write('{} {} {}'.format(e1, e2, dis) + '\\n')\n\n\nif __name__ == '__main__':\n main()\n print('Distance statistic done!')\n","sub_path":"Task3/distance.py","file_name":"distance.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"4162707","text":"# Copyright (C) 2017 Tiancheng Zhao, Carnegie Mellon University\n\nimport numpy as np\n\n\n# Data feed\nclass LongDataLoader(object):\n \"\"\"A special efficient data loader for TBPTT\"\"\"\n batch_size = 0\n ptr = 0\n num_batch = None\n batch_indexes = None\n indexes = None\n data_size = None\n name = None\n\n def _shuffle_indexes(self):\n np.random.shuffle(self.indexes)\n\n def _prepare_batch(self, batch_idx):\n raise NotImplementedError(\"Have to override prepare batch\")\n\n def epoch_init(self, batch_size, shuffle=True):\n self.ptr = 0\n\n if shuffle:\n self._shuffle_indexes()\n\n # create batch indexes\n self.batch_size = batch_size\n temp_num_batch = self.data_size // batch_size\n self.batch_indexes = []\n for i in range(temp_num_batch):\n self.batch_indexes.append(self.indexes[i * self.batch_size:(i + 1) * self.batch_size])\n\n left_over = self.data_size-temp_num_batch*batch_size\n\n self.num_batch = len(self.batch_indexes)\n\n print(\"%s begins with %d batches with %d left over samples\" % (self.name, self.num_batch, left_over))\n\n def next_batch(self):\n if self.ptr < self.num_batch:\n cur_batch = self._prepare_batch(self.ptr)\n self.ptr += 1\n return cur_batch\n else:\n return None\n\n\nclass DataLoader(LongDataLoader):\n def __init__(self, name, data, config):\n self.name = name\n self.data = data\n self.data_size = len(data)\n self.max_utt_size = config.max_utt_len\n self.indexes = np.arange(0, self.data_size)\n\n def pad_to(self, tokens, do_pad=True):\n if len(tokens) >= self.max_utt_size:\n return tokens[0:self.max_utt_size-1] + [tokens[-1]]\n elif do_pad:\n return tokens + [0] * (self.max_utt_size-len(tokens))\n else:\n return tokens\n\n def _prepare_batch(self, batch_idx):\n batch_ids = self.batch_indexes[batch_idx]\n rows = [self.data[idx] for idx in batch_ids] #[(context, response), ...]\n\n # input_context, context_lens, outputs, output_lens\n context_utts, context_lens, out_utts, out_lens, out_topics = [], [], [], [], []\n for context, response, topic in rows:\n context_utts.append([self.pad_to(sen) for sen in context])\n context_lens.append(len(context))\n\n out_utt = self.pad_to(response, do_pad=False)\n out_utts.append(out_utt)\n out_lens.append(len(out_utt))\n\n out_topics.append(topic)\n\n vec_context_lens = np.array(context_lens)\n vec_contexts = np.zeros((self.batch_size, np.max(vec_context_lens), self.max_utt_size), dtype=np.int32)\n vec_out_lens = np.array(out_lens)\n vec_outs = np.zeros((self.batch_size, np.max(out_lens)), dtype=np.int32)\n vec_out_topics = np.array(out_topics)\n\n for b_id in range(self.batch_size):\n vec_outs[b_id, 0:vec_out_lens[b_id]] = out_utts[b_id]\n vec_contexts[b_id, 0:vec_context_lens[b_id], :] = np.array(context_utts[b_id])\n\n return vec_contexts, vec_context_lens, vec_outs, vec_out_lens, vec_out_topics\n","sub_path":"data_apis/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"342124035","text":"import re\n\nclass PhysicalInfo(object):\n \n def set_date(self, date): \n if not isinstance(date, str):\n raise ValueError(\"date should be a string\")\n t = date.split(\"-\")\n if len(t) != 3:\n raise ValueError(\"date should be in MM-DD-YYYY format\")\n if re.search(r'[^0-9\\-]', date):\n raise ValueError(\"date should contain only numbers and -\")\n \n year = int(t[2])\n if year < 1900 or year > 2100:\n raise ValueError(\"invalid year {0}\".format(year))\n is_leap = year % 4 == 0 and (year % 400 == 0 or year % 100 != 0)\n \n month = int(t[0])\n if month < 1 or month > 12:\n raise ValueError(\"invalid month {0}\".format(month))\n day_limit = 31\n if month in [4, 6, 7, 9, 11]:\n day_limit = 30\n elif month == 2:\n if is_leap:\n day_limit = 29\n else:\n day_limit = 28\n \n day = int(t[1])\n if day < 1 or day > day_limit:\n raise ValueError(\"invalid day {0}\".format(day))\n \n self.date = date\n","sub_path":"content/Coverage Criteria/code-snippets-2-fytd/impl.py","file_name":"impl.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"607305821","text":"import sys\nimport os\nsys.path.append(\"../\")\nsys.path.append(\"flask_egg\")\nsys.path.append(\"flask\")\nsys.path.append(\"itsdangerous.egg-info\")\nfrom flask import Flask, render_template, request, url_for, send_from_directory\nfrom interface_server_db import *\nimport traceback\nimport time\n\n\napplication = Flask(__name__)\n\n@application.route(\"/\")\ndef hello():\n tuples = create_tuples(\"full_name\", \"party_name\")\n data = \"\"\n if tuples[1] == 0:\n data = {\"users_tuples\": create_tuples(\"full_name\", \"party_name\")[0],\n \"select_names\": [\"screen_name_1\", \"screen_name_2\"], \"popular_searches\": get_popular_searches()[0]}\n return render_template('index.html', **data)\n\n@application.route('/static/')\ndef send_static(path):\n return send_from_directory('static', path)\n\n@application.route(\"/bottom\", methods = ['GET', 'POST'])\ndef bottom():\n screen_name_1 = str(request.form['screen_name_1'])\n screen_name_2 = str(request.form['screen_name_2'])\n if screen_name_1 == screen_name_2 == 'disabled':\n return \"\"\n friendship = get_friendship(screen_name_1, screen_name_2)\n if friendship[1] == 0:\n return friendship[0]\n else:\n return \"ERROR!\" + friendship[0]\n\n@application.route(\"/person\", methods = ['GET', 'POST'])\ndef person():\n if str(request.form['location']) == \"left\":\n screen_name = str(request.form['screen_name_1'])\n return get_user_data(screen_name)\n elif str(request.form['location']) == \"right\":\n screen_name = str(request.form['screen_name_2'])\n return get_user_data(screen_name)\n\n\n@application.route(\"/update_all_users\")\ndef update_all_users():\n update_file = os.path.join(\"static\", \"system\", \"update_status.txt\")\n if os.path.isfile(update_file):\n create_time = str(time.ctime(os.path.getmtime(update_file)))\n return \"\".format(create_time)\n else:\n #update_all_users_backround() # commented out - update takes hours\n return \"\"\n\n\n\n@application.route(\"/top_searches\")\ndef top_searches():\n return get_popular_searches(3)[0] + \"
Click here to update all users\"\n\n\n\n@application.errorhandler(404)\ndef page_not_found(e):\n return render_template('errors/404.html'), 404\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 2:\n host = sys.argv[1]\n application.run(host=host,port=consts.TOMCAT_PORT)\n else:\n application.run(port=consts.TOMCAT_PORT)\n\n","sub_path":"Server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"626806938","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nmiddle = []\r\nstandard = []\r\nfor i in range(10,71,3):\r\n a = np.genfromtxt( str(i) + '.csv', skip_header = 1000, delimiter=',', usecols=(3,4))\r\n middle.append(np.mean(a[:,1]))\r\n standard.append(np.std(a[:,1]))\r\nprint(standard)\r\nprint(middle)\r\n \r\nX = np.linspace(10, 70, len(middle))\r\nC = middle\r\nfig,ax = plt.subplots(figsize=(9,9), dpi=100)\r\nax.plot(X, C, color=\"blue\", linewidth=2.5, linestyle=\"-\")\r\nax.plot(X, C, color=\"blue\", linewidth=2.5, linestyle=\"-\")","sub_path":"versuch1/versuch1.py","file_name":"versuch1.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"351182701","text":"#!/usr/bin/env python3\r\r\n# -*- coding: utf-8 -*-\r\r\n\"\"\"\r\r\nCreated on Fri Feb 1 22:27:02 2019\r\r\n\r\r\n@author: HP\r\r\n\"\"\"\r\r\n# importing libraries\r\r\nimport numpy as np\r\r\nimport nltk\r\r\nimport re\r\r\nfrom nltk.tokenize import sent_tokenize, word_tokenize\r\r\nfrom urllib import request\r\r\nfrom nltk.corpus import PlaintextCorpusReader\r\r\nfrom nltk.corpus import stopwords\r\r\n\r\r\nnltk.download('stopwords')\r\r\nfrom nltk.stem.porter import PorterStemmer\r\r\n\r\r\nstemmer = PorterStemmer()\r\r\nstop_words = set(stopwords.words('english'))\r\r\nfrom nltk.stem import WordNetLemmatizer\r\r\n\r\r\nlemmatizer = WordNetLemmatizer()\r\r\nfrom sklearn.model_selection import cross_val_score\r\r\n\r\r\n\r\r\n# Data preparation and preprocess\r\r\ndef custom_preprocessor(text):\r\r\n print(\"hiiii\")\r\r\n print('inside first if')\r\r\n text = re.sub(r'\\W+|\\d+|_', ' ', text) # removing numbers and punctuations\r\r\n text = re.sub(r'\\s+', ' ', text) # remove multiple spaces into a single space\r\r\n text = re.sub(r\"\\s+[a-zA-Z]\\s+\", ' ', text) # remove a single character\r\r\n text = text.lower()\r\r\n text = nltk.word_tokenize(text) # tokenizing\r\r\n text = [word for word in text if not word in stop_words] # English Stopwords\r\r\n text = [lemmatizer.lemmatize(word) for word in text] # Lemmatising\r\r\n return text\r\r\n\r\r\n\r\r\nfilepath_dict = {'Book1': 'https://www.gutenberg.org/files/58764/58764-0.txt',\r\r\n 'Book2': 'https://www.gutenberg.org/files/58751/58751-0.txt',\r\r\n 'Book3': 'http://www.gutenberg.org/cache/epub/345/pg345.txt'}\r\r\n\r\r\nfor key, value in filepath_dict.items():\r\r\n if (key == \"Book1\"):\r\r\n bookLoc = filepath_dict[key]\r\r\n response = request.urlopen(bookLoc)\r\r\n raw = response.read().decode('utf-8')\r\r\n len(raw)\r\r\n first_book = custom_preprocessor(raw)\r\r\n len(first_book)\r\r\n elif (key == \"Book2\"):\r\r\n bookLoc = filepath_dict[key]\r\r\n response = request.urlopen(bookLoc)\r\r\n raw = response.read().decode('utf-8')\r\r\n len(raw)\r\r\n second_book = custom_preprocessor(raw)\r\r\n elif (key == \"Book3\"):\r\r\n bookLoc = filepath_dict[key]\r\r\n response = request.urlopen(bookLoc)\r\r\n raw = response.read().decode('utf-8')\r\r\n len(raw)\r\r\n third_book = custom_preprocessor(raw)\r\r\n else:\r\r\n pass\r\r\n\r\r\n# Building First Book\r\r\nfirst_book_text = ' '.join(first_book)\r\r\nfileLoc = 'C:\\\\Users\\\\Hp\\\\EBC7100\\\\FirstBook\\\\a.txt'\r\r\nwith open(fileLoc, 'a', encoding=\"utf-8\") as fout:\r\r\n fout.write(first_book_text)\r\r\n\r\r\n# Building Second Book\r\r\nsecond_book_text = ' '.join(second_book)\r\r\nfileLoc = 'C:\\\\Users\\\\Hp\\\\EBC7100\\\\SecondBook\\\\b.txt'\r\r\nwith open(fileLoc, 'a', encoding=\"utf-8\") as fout:\r\r\n fout.write(second_book_text)\r\r\n\r\r\n# Building Third Book\r\r\nthird_book_text = ' '.join(third_book)\r\r\nfileLoc = 'C:\\\\Users\\\\Hp\\\\EBC7100\\\\ThirdBook\\\\c.txt'\r\r\nwith open(fileLoc, 'a', encoding=\"utf-8\") as fout:\r\r\n fout.write(third_book_text)\r\r\n\r\r\n\r\r\n# labeling\r\r\n# Cretaing tuple\r\r\n\r\r\n# aBooklist = []\r\r\n\r\r\ndef readAtxtfile(bookText, docs, labels):\r\r\n x = 0\r\r\n i = 0\r\r\n n = 150\r\r\n while x < 200:\r\r\n temp = \"\"\r\r\n words = bookText.split(\" \")[i:n]\r\r\n # print (\"----->\",words)\r\r\n for word in words:\r\r\n temp = word + \" \" + temp\r\r\n # temp = temp + ',a'\r\r\n # global aBooklist\r\r\n docs.append(temp)\r\r\n labels.append(0)\r\r\n i += 150\r\r\n n += 150\r\r\n x += 1\r\r\n\r\r\n return docs, labels\r\r\n\r\r\n\r\r\n# Cretaing tuple\r\r\n# bBooklist = []\r\r\n\r\r\n\r\r\ndef readBtxtfile(bookText, docs, labels):\r\r\n x = 0\r\r\n i = 0\r\r\n n = 150\r\r\n while x < 184:\r\r\n temp = \"\"\r\r\n words = bookText.split(\" \")[i:n]\r\r\n # print (\"----->\",words)\r\r\n for word in words:\r\r\n temp = word + \" \" + temp\r\r\n # temp = temp + ',b'\r\r\n # print (s)\r\r\n # global bBooklist\r\r\n # bBooklist.append(temp)\r\r\n docs.append(temp)\r\r\n labels.append(1)\r\r\n i += 150\r\r\n n += 150\r\r\n x += 1\r\r\n\r\r\n return docs, labels\r\r\n\r\r\n\r\r\n# Cretaing tuple\r\r\n# cBooklist = []\r\r\n\r\r\ndef readCtxtfile(bookText, docs, labels):\r\r\n x = 0\r\r\n i = 0\r\r\n n = 150\r\r\n while x < 200:\r\r\n temp = \"\"\r\r\n words = bookText.split(\" \")[i:n]\r\r\n # print (\"----->\",words)\r\r\n for word in words:\r\r\n temp = word + \" \" + temp\r\r\n # temp = temp + ',c'\r\r\n # print (s)\r\r\n # global cBooklist\r\r\n # cBooklist.append(temp)\r\r\n docs.append(temp)\r\r\n labels.append(2)\r\r\n i += 150\r\r\n n += 150\r\r\n x += 1\r\r\n\r\r\n return docs, labels\r\r\n\r\r\n\r\r\ndocs = []\r\r\nlabels = []\r\r\ndocs, labels = readAtxtfile(first_book_text, docs, labels)\r\r\n# print(aBooklist)\r\r\ndocs, labels = readBtxtfile(second_book_text, docs, labels)\r\r\n# print(bBooklist)\r\r\ndocs, labels = readCtxtfile(third_book_text, docs, labels)\r\r\n# print(cBooklist)\r\r\n\r\r\nprint(len(docs))\r\r\nprint(docs)\r\nprint(labels)\r\nprint(len(labels))\r\r\n\r\r\n# Data transformation TF-IDF\r\n# Creating the TF-IDF model\r\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nvectorizer = TfidfVectorizer(max_features = 8000, min_df = 3, max_df = 0.6)\r\nTF_X = vectorizer.fit_transform(docs)\r\nTF_X.toarray()\r\n\r\r\n# Splitting the dataset into the Training set and Test set\r\r\nfrom sklearn.model_selection import train_test_split\r\r\n\rx_train, x_val, y_train, y_val = train_test_split(TF_X, labels, test_size=0.20, random_state=42, shuffle=True)\r\r\n\r\r\n# fitting the model into machine learning algorithm\r\r\n# from nltk.classify.scikitlearn import SklearnClassifier\r\r\nfrom sklearn.model_selection import cross_validate\r\r\n# Training first classifier\r\nfrom sklearn.svm import SVC\r\r\r\nclf = SVC(kernel='linear', C=0.70, gamma='auto',random_state=42)\r\r\nscores = cross_val_score(clf, x_train, y_train, cv=10)\r\r\nprint(scores)\r\nprint(\"Accuracy: {} (+/- {})\".format(scores.mean(), scores.std() * 2))\r\r\nfrom sklearn.model_selection import StratifiedShuffleSplit\r\nssf = StratifiedShuffleSplit(n_splits=10, test_size=0.20, random_state=42)\r\nclf = SVC(kernel='rbf', C=1.0, gamma='auto', random_state=21)\r\n#****************End of training of first classifier******************#\r\n\r\n#Training Second classifier #\r\nfrom sklearn import tree\r\nclf = tree.DecisionTreeClassifier(criterion = 'gini', random_state = 42)\r\nscores = cross_val_score(clf,x_train, y_train, cv=10)\r\nprint(\"Accuracy: {} (+/- {})\".format(scores.mean(), scores.std() * 2))\r\nfrom sklearn.model_selection import StratifiedShuffleSplit\r\nssf = StratifiedShuffleSplit(n_splits=10, test_size=0.20, random_state=42)\r\nclf = tree.DecisionTreeClassifier(criterion = 'gini', random_state = 42)\r\n#****************End of training of second classifier******************#\r\n\r\n#Training third classifier # k-Nearest Neighbor\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nclf = KNeighborsClassifier(n_neighbors = 5, metric= 'minkowski', p = 2 )\r\nscores = cross_val_score(clf, x_train, y_train, cv=10)\r\nprint(\"Accuracy: {} (+/- {})\".format(scores.mean(), scores.std() * 2))\r\n# manual cross validation with shuffle\r\nfrom sklearn.model_selection import StratifiedShuffleSplit\r\nssf = StratifiedShuffleSplit(n_splits=10, test_size=0.20, random_state=42)\r\nclf = KNeighborsClassifier(n_neighbors = 5, metric= 'minkowski', p = 2 )\r\n#****************End of training of Third classifier******************#\r\n\r\nnew_scores = []\r\nX_array = TF_X.toarray()\r\nlabels = np.asarray(labels)\r\nfrom sklearn.metrics import accuracy_score\r\nfor train_index, val_index in ssf.split(X_array, labels):\r\n x_train, y_train = X_array[train_index], labels[train_index]\r\n x_val, y_val = X_array[val_index], labels[val_index]\r\n clf.fit(x_train, y_train)\r\n prediction_scores = clf.predict(x_val)\r\n print(accuracy_score(y_val, prediction_scores))\r\n new_scores.append(accuracy_score(y_val, prediction_scores))\r\n\r\nprint(np.mean(new_scores))\r\n\r\n#*******Confusion Matrix for error analysis/performance analysis\r\n\r\n# confusion_matrix\r\nfrom sklearn.metrics import confusion_matrix\r\ncm = confusion_matrix(y_val, prediction_scores)\r\nprint(cm)\r\n\r\n#********End of Confusion Matrix**********************************\r\n\r\n\r\n\r\n\r\r\n\r\r\n'''\r\r\ncv = cross_validation.KFold(len(train_set), n_folds=10, shuffle=False, random_state=0)\r\r\nfor traincv, testcv in cv:\r\r\n\tclassifier = nltk.SklearnClassifier(SVC(kernel = 'rbf', random_state = 0)).train(train_set[traincv[0]:traincv[len(traincv)-1]])\r\r\n\tprint ('accuracy:', nltk.classify.util.accuracy(classifier, train_set[testcv[0]:testcv[len(testcv)-1]]))\r\r\n\r\ncv = cross_val_score.KFold(len(train_set), n_folds=10, shuffle=False, random_state=None)\r\r\nfor traincv, testcv in cv:\r\r\n\tclassifier = nltk.NaiveBayesClassifier.train(train_set[traincv[0]:traincv[len(traincv)-1]])\r\r\n\tprint ('accuracy:', nltk.classify.util.accuracy(classifier, train_set[testcv[0]:testcv[len(testcv)-1]]))\r\r\n\r\n'''\r\n","sub_path":"Authorship_TF_IDF.py","file_name":"Authorship_TF_IDF.py","file_ext":"py","file_size_in_byte":8844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"72937610","text":"from django.urls import path\nfrom .views import *\n\napp_name = \"profiles\"\n\nurlpatterns = [\n path('create/', ProfileCreateView.as_view(), name=\"create\"),\n path('invite//', InviteView.as_view(), name=\"invite\"),\n path('/', ProfileDetailView.as_view(), name=\"detail\"),\n path('/update/', ProfileUpdateView.as_view(), name=\"update\"),\n path('activate/', ProfileActivationView.as_view(), name=\"activate\")\n]\n","sub_path":"webapp/profiles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"228174022","text":"\"\"\"Exercise 2.5:\nWrite a function roots that computes the roots of a quadratic equation. Check for complex roots and print an \nerror message saying that the roots are complex.\"\"\"\n\nimport math\n\ndef findRoot(a,b,c):\n #finds discriminant\n discrim = (b ** 2) - (4 * a * c)\n\n if discrim < 0:\n print(\"Roots are complex\")\n return None\n else:\n root1 = (b + math.sqrt(discrim)) / (2 * a)\n root2 = (b - math.sqrt(discrim)) / (2 * a)\n print(\"Root 1 is:\", root1, \"Root2 is:\", root2)\n return (root1,root2)\n","sub_path":"2.5_quadratic_formula.py","file_name":"2.5_quadratic_formula.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"456813319","text":"class Solution(object):\n def minWindow(self, s, t):\n t_dict = {}\n for letter in t:\n t_dict[letter] = t_dict.get(letter, 0) + 1\n s_dict = {}\n i = 0\n j = 0\n required = len(t_dict)\n formed = 0\n ans = float(\"inf\"), None, None\n while j < len(s): \n character = s[j]\n s_dict[character] = s_dict.get(character, 0) + 1\n if character in t_dict and s_dict[character] == t_dict[character]:\n formed += 1\n while i <= j and formed == required:\n if j - i + 1 < ans[0]:\n ans = j - i + 1, i, j \n character = s[i]\n s_dict[character] -= 1\n if character in t_dict and s_dict[character] < t_dict[character]:\n formed -= 1\n i += 1\n j += 1\n if ans[0] != float(\"inf\"):\n return s[ans[1]:ans[2] + 1]\n else:\n return \"\"","sub_path":"hard-collection/array-and-strings/minimum-window-substring.py","file_name":"minimum-window-substring.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"125243006","text":"#!/apollo/sbin/envroot \"$ENVROOT/python3.6/bin/python3.6\"\n'''\nAuthor: Jason Guo (jasguo@amazon.com) and Edi Wibowo (ewi@amazon.com)\nDate: 17 Aug 2020\n\nThis script is to find available interfaces on vc-edg routers. \nAvailable interfaces are free and not oversubscribed.\n\nThe calculation rule of subscription:\n- FPC \"MPC 3D 16x 10GE\" can have a maximum of 16 available interfaces \nwith each PIC having a maximum of 3 available interfaces\n- FPC \"MPC4E-3D-32XGE-SFPP\" can have a maximum of 16 available interfaces when redundancy is enabled \nwith each PPE (PICs 0-1 or PICs 2-3) having a maximum of 13 available interfaces\n\n\nFor example:\nINPUT:\nHostnames: iad6-vc-edg-r311,iad6-vc-edg-r312\n\nOUTPUT:\nHostname: iad6-vc-edg-r311\navailable ports:\nxe-0/0/2\nxe-0/2/3\nxe-1/0/3\nxe-1/1/3\nnumber of available ports: 4\n---\nHostname: iad6-vc-edg-r312\navailable ports:\nxe-0/0/2\nxe-0/2/3\nxe-0/3/1\nxe-1/1/2\nxe-1/1/3\nxe-1/2/2\nnumber of available ports: 6\n\n'''\n\nimport re\nimport sys\n\nfrom isd_tools_dev.modules import nsm as nsm_isd\nfrom dxd_tools_dev.modules import nsm as nsm_dxd\n\ndef populate_slot_and_subslot(used_interfaces, fpc_dict, slot_dict, subslot_dict):\n for u in used_interfaces:\n\n found = re.match(\"xe-(\\d+)/(\\d+)/(\\d+)\", u)\n if found:\n slot_num = found.groups()[0]\n subslot_num = found.groups()[1]\n\n if slot_num in slot_dict.keys():\n slot_dict[slot_num] += 1\n else:\n slot_dict[slot_num] = 1\n\n fpc_slot = \"FPC {}\".format(slot_num)\n\n if fpc_dict[fpc_slot] == \"MPC 3D 16x 10GE\":\n slot_subslot_num = \"{}-{}\".format(slot_num, subslot_num)\n else:\n if subslot_num == '0' or subslot_num == '1':\n ppe = 'A'\n else:\n ppe = 'B'\n slot_subslot_num = \"{}-{}\".format(slot_num, ppe)\n\n if slot_subslot_num in subslot_dict.keys():\n subslot_dict[slot_subslot_num] += 1\n else:\n subslot_dict[slot_subslot_num] = 1\n\ndef is_port_available(slot_num, subslot_num, slot_dict, subslot_dict):\n slot_available = False\n subslot_available = False\n\n fpc_slot = \"FPC {}\".format(slot_num)\n\n if fpc_dict[fpc_slot] == \"MPC 3D 16x 10GE\":\n if slot_dict.get(slot_num) != None and slot_dict.get(slot_num) < 16:\n slot_available = True\n \n slot_subslot_num = \"{}-{}\".format(slot_num,subslot_num)\n \n if subslot_dict.get(slot_subslot_num) != None and subslot_dict.get(slot_subslot_num) < 3:\n subslot_available = True\n else:\n if slot_dict.get(slot_num) == None or slot_dict.get(slot_num) < 24:\n slot_available = True\n\n if subslot_num == '0' or subslot_num == '1':\n ppe = 'A'\n else:\n ppe = 'B'\n \n slot_subslot_num = \"{}-{}\".format(slot_num,ppe)\n \n if subslot_dict.get(slot_subslot_num) == None or subslot_dict.get(slot_subslot_num) < 13:\n subslot_available = True\n\n if slot_available and subslot_available:\n if slot_num in slot_dict.keys():\n slot_dict[slot_num] += 1\n else:\n slot_dict[slot_num] = 1\n \n if slot_subslot_num in subslot_dict.keys():\n subslot_dict[slot_subslot_num] += 1\n else:\n subslot_dict[slot_subslot_num] = 1\n \n return True\n \n return False\n\ndef get_interface_lists(nsm_isd_raw):\n free_interfaces = list()\n excluded_interfaces = list()\n used_interfaces = list()\n \n for interface in nsm_isd_raw:\n if interface['class'] == 'physical' and 'xe-' in interface['name']:\n if \"aggregation\" in interface.keys() and interface['capacity_status'] == 'free':\n used_interfaces.append(interface['name'])\n elif interface['status'] == \"down\" and interface['interface_description'] == '' and interface['capacity_status'] == \"free\":\n free_interfaces.append(interface['name'])\n else:\n used_interfaces.append(interface['name'])\n \n # Ports that are excluded from subscription calculation\n if 'ae9' in interface['interface_description']:\n excluded_interfaces.append(interface['name'])\n\n return free_interfaces,used_interfaces,excluded_interfaces\n\ndef sort_key(f):\n slot_num = 0\n subslot_num = 0\n port_num = 0\n found = re.match(\"xe-(\\d+)/(\\d+)/(\\d+)\", f)\n if found:\n slot_num = found.groups()[0]\n subslot_num = found.groups()[1]\n port_num = found.groups()[2]\n\n return int(slot_num), int(subslot_num), int(port_num)\n\ndef get_available_interfaces(free_interfaces, slot_dict, subslot_dict):\n available_interfaces = []\n\n free_interfaces.sort(key=sort_key)\n\n for f in free_interfaces:\n found = re.match(\"xe-(\\d+)/(\\d+)/(\\d+)\", f)\n if found:\n slot_num = found.groups()[0]\n subslot_num = found.groups()[1]\n if is_port_available(slot_num, subslot_num, slot_dict, subslot_dict):\n available_interfaces.append(f)\n\n return available_interfaces\n\ndef get_new_interfaces():\n print(fpc_dict)\n new_linecard = input(\"Slots of new line cards (mutiple linecards can be added with comma delimiter or Press Enter to skip): \")\n \n new_interfaces = []\n fpc = new_linecard.split(',')\n\n if len(fpc) == 0:\n return new_interfaces\n\n pic =[x for x in range(4)]\n ports = [x for x in range(8)]\n \n for port in ports:\n for p in pic:\n for f in fpc:\n intf = \"xe-{}/{}/{}\".format(f,p,port)\n new_interfaces.append(intf)\n\n '''\n print(\"new interfaces:\")\n for n in new_interfaces:\n print(n)\n '''\n\n slot_dict = {}\n subslot_dict = {}\n\n # Allocate ports from the new ports\n allocated_interfaces = []\n number_required_ports = 100\n #number_required_ports = int(input(\"Number of required ports: \"))\n\n counter = 0\n for n in new_interfaces:\n slot_available = False\n subslot_available = False\n \n found = re.match(\"xe-(\\d+)/(\\d+)/(\\d+)\", n.strip())\n\n if found:\n \n slot_num = found.groups()[0]\n subslot_num = found.groups()[1]\n\n if slot_dict.get(slot_num) == None or slot_dict.get(slot_num) < 16:\n slot_available = True\n\n if subslot_num == '0' or subslot_num == '1':\n ppe = 'A'\n else:\n ppe = 'B'\n\n slot_subslot_num = \"{}-{}\".format(slot_num,ppe)\n \n if subslot_dict.get(slot_subslot_num) == None or subslot_dict.get(slot_subslot_num) < 13:\n subslot_available = True\n\n if slot_available and subslot_available:\n if slot_num in slot_dict.keys():\n slot_dict[slot_num] += 1\n else:\n slot_dict[slot_num] = 1\n if slot_subslot_num in subslot_dict.keys():\n subslot_dict[slot_subslot_num] += 1\n else:\n subslot_dict[slot_subslot_num] = 1\n counter += 1\n allocated_interfaces.append(n)\n \n '''\n if counter >= number_required_ports:\n #allocated_interfaces.sort(key=sort_key)\n return allocated_interfaces\n '''\n \n #allocated_interfaces.sort(key=sort_key)\n return allocated_interfaces\n \nif __name__ == '__main__':\n hostnames = input(\"Hostnames (mutiple hostnames can be entered with comma delimiter): \").split(',')\n \n for hostname in hostnames:\n print(\"---\")\n print(\"Hostname: {}\".format(hostname))\n int_nsm_isd = nsm_isd.get_raw_device(hostname)['interfaces']\n \n free_interfaces, used_interfaces, excluded_interfaces = get_interface_lists(int_nsm_isd)\n \n free_interfaces.sort()\n used_interfaces.sort()\n excluded_interfaces.sort()\n\n '''\n print(\"free interfaces:\")\n for f in free_interfaces:\n print(f)\n\n print(\"used interfaces:\")\n for u in used_interfaces:\n print(u)\n\n print(\"excluded interfaces:\")\n for e in excluded_interfaces:\n print(e)\n '''\n\n # Remove excluded interfaces before calculating the interface subscription\n used_interfaces_set = set(used_interfaces)\n used_interfaces_set = used_interfaces_set.difference(excluded_interfaces)\n used_interfaces = list(used_interfaces_set)\n\n hard = nsm_dxd.get_device_hardware_from_nsm(hostname)\n fpc_dict = hard['FPC']\n \n slot_dict = {}\n subslot_dict = {}\n\n populate_slot_and_subslot(used_interfaces, fpc_dict, slot_dict, subslot_dict)\n\n # Get available ports which are not oversubscribed based on slot and subslot availability\n available_interfaces = get_available_interfaces(free_interfaces, slot_dict, subslot_dict)\n\n print(\"available interfaces:\")\n for a in available_interfaces:\n print(a)\n print(\"number of available interfaces: {}\".format(len(available_interfaces)))\n\n new_interfaces = get_new_interfaces()\n \n new_interfaces.sort(key=sort_key)\n\n print(\"new interfaces:\")\n for n in new_interfaces:\n print(n)\n","sub_path":"aws/vc_edg_get_available_intf.py","file_name":"vc_edg_get_available_intf.py","file_ext":"py","file_size_in_byte":9465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"547456065","text":"import requests\n\n\nclass ResourceHelper(object):\n \"\"\"\n Class for helper methods for making requests against the Datadog API. A new instance should\n be instantiated within the action/trigger being developed. New methods should be\n created as instance methods to allow reference of the logger and session passed to\n the __init__ function during instantiation.\n \"\"\"\n\n _ERRORS = {\n 400: \"Bad Request\",\n 401: \"Unauthorized\",\n 404: \"Not Found\",\n 500: \"Internal Server Error\",\n 503: \"Service Unavailable\",\n 000: \"Unknown Status Code\",\n }\n\n def __init__(self, session, logger):\n \"\"\"\n Creates a new instance of ResourceHelper\n :param session: Session object available to Komand actions/triggers, usually self.connection.session\n :param logger: Logger object available to Komand actions/triggers, usually self.logger\n :return: ResourceHelper object\n \"\"\"\n self.logger = logger\n self.session = session\n\n def resource_request(self, endpoint: str, method: str = \"get\", params: dict = None, payload: dict = None) -> dict:\n \"\"\"\n Sends a request to API with the provided endpoint and optional method/payload\n :param endpoint: Endpoint for the API call defined in endpoints.py\n :param method: HTTP method for the API request\n :param params: URL parameters to append to the request\n :param payload: JSON body for the API request if required\n :return: Dict containing the JSON response body\n \"\"\"\n try:\n request_method = getattr(self.session, method.lower())\n\n if not params:\n params = {}\n if not payload:\n response = request_method(url=endpoint, params=params, verify=False)\n else:\n response = request_method(url=endpoint, params=params, json=payload, verify=False)\n except requests.RequestException as e:\n self.logger.error(e)\n raise\n\n if response.status_code in range(200, 299):\n resource = response.json()\n return {\"resource\": resource, \"status\": response.status_code}\n else:\n try:\n error = response.json()\n except KeyError:\n self.logger.error(f\"Code: {response.status_code}, message: {error}\")\n error = \"Unknown error occurred. Please contact support or try again later.\"\n\n status_code_message = self._ERRORS.get(response.status_code, self._ERRORS[000])\n self.logger.error(f\"{status_code_message} ({response.status_code}): {error}\")\n raise Exception(f\"Datadog returned a status code of {response.status_code}: {status_code_message}\")\n","sub_path":"plugins/datadog/komand_datadog/util/resource_helper.py","file_name":"resource_helper.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"474234984","text":"def select_sort(list):\n if len(list) <= 1:\n return\n\n for i in range(len(list)-1, 0, -1):\n max_value = list[0]\n index = 0\n for j in range(0, i+1):\n if list[j] > max_value:\n index = j\n max_value = list[j]\n list[index], list[j] = list[j], list[index]\n return list\n\nlist = [2, 3, 1, 5, 4]\nres = select_sort(list)\nprint(res)","sub_path":"dir_sort/select_sort.py","file_name":"select_sort.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"412148676","text":"#!/usr/bin/env python3\n\nclass SafeDict(dict):\n \"\"\"Safe formatter\n Saves keys without value untouched\n \"\"\"\n def __missing__(self, key):\n return '{' + key + '}'\n\ndef sampleFormat(text, **args):\n return text.format_map(SafeDict(args))\n\ndef makeStatusLine(statusCode, customReasonPhrase=None):\n \"\"\"Generate Status-Line\n statusCode - SIP status code\n customReasonPhrase - custom reason phrase, overrided standard\n \"\"\"\n if customReasonPhrase is None:\n return 'SIP/2.0 ' + {\n '200': '200 OK',\n '400': '400 Bad Request',\n '401': '401 Unauthorized',\n '402': '402 Payment Required',\n '403': '403 Forbidden',\n '404': '404 Not Found',\n '405': '405 Method Not Allowed',\n '406': '406 Not Acceptable',\n '407': '407 Proxy Authentication Required',\n '408': '408 Request Timeout',\n '410': '410 Gone',\n '413': '413 Request Entity Too Large',\n '414': '414 Request-URI Too Large',\n '415': '415 Unsupported Media Type',\n '416': '416 Unsupported URI Scheme',\n '420': '420 Bad Extension',\n '421': '421 Extension Required',\n '423': '423 Interval Too Brief',\n '480': '480 Temporarily not available',\n '481': '481 Call Leg/Transaction Does Not Exist',\n '482': '482 Loop Detected',\n '483': '483 Too Many Hops',\n '484': '484 Address Incomplete',\n '485': '485 Ambiguous',\n '486': '486 Busy Here',\n '487': '487 Request Terminated',\n '488': '488 Not Acceptable Here',\n '491': '491 Request Pending',\n '493': '493 Undecipherable',\n '500': '500 Internal Server Error',\n '501': '501 Not Implemented',\n '502': '502 Bad Gateway',\n '503': '503 Service Unavailable',\n '504': '504 Server Time-out',\n '505': '505 SIP Version not supported',\n '513': '513 Message Too Large',\n '600': '600 Busy Everywhere',\n '603': '603 Decline',\n '604': '604 Does not exist anywhere',\n '606': '606 Not Acceptable'\n }.get(str(statusCode), '{} Unknown Status Code'.format(statusCode))\n else:\n return 'SIP/2.0 {} {}'.format(statusCode, customReasonPhrase)\n\ndef getReferencesList(refs):\n return ''.format(','.join(refs))\n\ndef needFormat(s):\n try:\n s.format()\n except:\n return True\n else:\n return False\n\nclass Xml():\n\n msgScenario = \\\n '''\n \n {initial}\n {final}\n \n '''\n\n msgSendLast = \\\n '''\n \n \n \n '''\n\n msgInitial = \\\n '''\n {initial}\n \n '''\n\n msgFinal = \\\n '''\n {final}\n \n